1 /************************************************************************/
3 \brief Realtime audio i/o C++ classes.
5 RtAudio provides a common API (Application Programming Interface)
6 for realtime audio input/output across Linux (native ALSA, Jack,
7 and OSS), Macintosh OS X (CoreAudio and Jack), and Windows
8 (DirectSound, ASIO and WASAPI) operating systems.
10 RtAudio WWW site: http://www.music.mcgill.ca/~gary/rtaudio/
12 RtAudio: realtime audio i/o C++ classes
13 Copyright (c) 2001-2017 Gary P. Scavone
15 Permission is hereby granted, free of charge, to any person
16 obtaining a copy of this software and associated documentation files
17 (the "Software"), to deal in the Software without restriction,
18 including without limitation the rights to use, copy, modify, merge,
19 publish, distribute, sublicense, and/or sell copies of the Software,
20 and to permit persons to whom the Software is furnished to do so,
21 subject to the following conditions:
23 The above copyright notice and this permission notice shall be
24 included in all copies or substantial portions of the Software.
26 Any person wishing to distribute modifications to the Software is
27 asked to send the modifications to the original developer so that
28 they can be incorporated into the canonical version. This is,
29 however, not a binding provision of this license.
31 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
32 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
33 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
34 IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
35 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
36 CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
37 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
39 /************************************************************************/
41 // RtAudio: Version 5.0.0
51 // Static variable definitions.
52 const unsigned int RtApi::MAX_SAMPLE_RATES = 14;
53 const unsigned int RtApi::SAMPLE_RATES[] = {
54 4000, 5512, 8000, 9600, 11025, 16000, 22050,
55 32000, 44100, 48000, 88200, 96000, 176400, 192000
58 #if defined(__WINDOWS_DS__) || defined(__WINDOWS_ASIO__) || defined(__WINDOWS_WASAPI__)
59 #define MUTEX_INITIALIZE(A) InitializeCriticalSection(A)
60 #define MUTEX_DESTROY(A) DeleteCriticalSection(A)
61 #define MUTEX_LOCK(A) EnterCriticalSection(A)
62 #define MUTEX_UNLOCK(A) LeaveCriticalSection(A)
66 static std::string convertCharPointerToStdString(const char *text)
68 return std::string(text);
71 static std::string convertCharPointerToStdString(const wchar_t *text)
73 int length = WideCharToMultiByte(CP_UTF8, 0, text, -1, NULL, 0, NULL, NULL);
74 std::string s( length-1, '\0' );
75 WideCharToMultiByte(CP_UTF8, 0, text, -1, &s[0], length, NULL, NULL);
79 #elif defined(__LINUX_ALSA__) || defined(__LINUX_PULSE__) || defined(__UNIX_JACK__) || defined(__LINUX_OSS__) || defined(__MACOSX_CORE__)
81 #define MUTEX_INITIALIZE(A) pthread_mutex_init(A, NULL)
82 #define MUTEX_DESTROY(A) pthread_mutex_destroy(A)
83 #define MUTEX_LOCK(A) pthread_mutex_lock(A)
84 #define MUTEX_UNLOCK(A) pthread_mutex_unlock(A)
86 #define MUTEX_INITIALIZE(A) abs(*A) // dummy definitions
87 #define MUTEX_DESTROY(A) abs(*A) // dummy definitions
90 // *************************************************** //
92 // RtAudio definitions.
94 // *************************************************** //
96 std::string RtAudio :: getVersion( void )
98 return RTAUDIO_VERSION;
101 void RtAudio :: getCompiledApi( std::vector<RtAudio::Api> &apis )
105 // The order here will control the order of RtAudio's API search in
107 #if defined(__UNIX_JACK__)
108 apis.push_back( UNIX_JACK );
110 #if defined(__LINUX_PULSE__)
111 apis.push_back( LINUX_PULSE );
113 #if defined(__LINUX_ALSA__)
114 apis.push_back( LINUX_ALSA );
116 #if defined(__LINUX_OSS__)
117 apis.push_back( LINUX_OSS );
119 #if defined(__WINDOWS_ASIO__)
120 apis.push_back( WINDOWS_ASIO );
122 #if defined(__WINDOWS_WASAPI__)
123 apis.push_back( WINDOWS_WASAPI );
125 #if defined(__WINDOWS_DS__)
126 apis.push_back( WINDOWS_DS );
128 #if defined(__MACOSX_CORE__)
129 apis.push_back( MACOSX_CORE );
131 #if defined(__RTAUDIO_DUMMY__)
132 apis.push_back( RTAUDIO_DUMMY );
136 void RtAudio :: openRtApi( RtAudio::Api api )
142 #if defined(__UNIX_JACK__)
143 if ( api == UNIX_JACK )
144 rtapi_ = new RtApiJack();
146 #if defined(__LINUX_ALSA__)
147 if ( api == LINUX_ALSA )
148 rtapi_ = new RtApiAlsa();
150 #if defined(__LINUX_PULSE__)
151 if ( api == LINUX_PULSE )
152 rtapi_ = new RtApiPulse();
154 #if defined(__LINUX_OSS__)
155 if ( api == LINUX_OSS )
156 rtapi_ = new RtApiOss();
158 #if defined(__WINDOWS_ASIO__)
159 if ( api == WINDOWS_ASIO )
160 rtapi_ = new RtApiAsio();
162 #if defined(__WINDOWS_WASAPI__)
163 if ( api == WINDOWS_WASAPI )
164 rtapi_ = new RtApiWasapi();
166 #if defined(__WINDOWS_DS__)
167 if ( api == WINDOWS_DS )
168 rtapi_ = new RtApiDs();
170 #if defined(__MACOSX_CORE__)
171 if ( api == MACOSX_CORE )
172 rtapi_ = new RtApiCore();
174 #if defined(__RTAUDIO_DUMMY__)
175 if ( api == RTAUDIO_DUMMY )
176 rtapi_ = new RtApiDummy();
180 RtAudio :: RtAudio( RtAudio::Api api )
184 if ( api != UNSPECIFIED ) {
185 // Attempt to open the specified API.
187 if ( rtapi_ ) return;
189 // No compiled support for specified API value. Issue a debug
190 // warning and continue as if no API was specified.
191 std::cerr << "\nRtAudio: no compiled support for specified API argument!\n" << std::endl;
194 // Iterate through the compiled APIs and return as soon as we find
195 // one with at least one device or we reach the end of the list.
196 std::vector< RtAudio::Api > apis;
197 getCompiledApi( apis );
198 for ( unsigned int i=0; i<apis.size(); i++ ) {
199 openRtApi( apis[i] );
200 if ( rtapi_ && rtapi_->getDeviceCount() ) break;
203 if ( rtapi_ ) return;
205 // It should not be possible to get here because the preprocessor
206 // definition __RTAUDIO_DUMMY__ is automatically defined if no
207 // API-specific definitions are passed to the compiler. But just in
208 // case something weird happens, we'll thow an error.
209 std::string errorText = "\nRtAudio: no compiled API support found ... critical error!!\n\n";
210 throw( RtAudioError( errorText, RtAudioError::UNSPECIFIED ) );
213 RtAudio :: ~RtAudio()
219 void RtAudio :: openStream( RtAudio::StreamParameters *outputParameters,
220 RtAudio::StreamParameters *inputParameters,
221 RtAudioFormat format, unsigned int sampleRate,
222 unsigned int *bufferFrames,
223 RtAudioCallback callback, void *userData,
224 RtAudio::StreamOptions *options,
225 RtAudioErrorCallback errorCallback )
227 return rtapi_->openStream( outputParameters, inputParameters, format,
228 sampleRate, bufferFrames, callback,
229 userData, options, errorCallback );
232 // *************************************************** //
234 // Public RtApi definitions (see end of file for
235 // private or protected utility functions).
237 // *************************************************** //
241 stream_.state = STREAM_CLOSED;
242 stream_.mode = UNINITIALIZED;
243 stream_.apiHandle = 0;
244 stream_.userBuffer[0] = 0;
245 stream_.userBuffer[1] = 0;
246 MUTEX_INITIALIZE( &stream_.mutex );
247 showWarnings_ = true;
248 firstErrorOccurred_ = false;
253 MUTEX_DESTROY( &stream_.mutex );
256 void RtApi :: openStream( RtAudio::StreamParameters *oParams,
257 RtAudio::StreamParameters *iParams,
258 RtAudioFormat format, unsigned int sampleRate,
259 unsigned int *bufferFrames,
260 RtAudioCallback callback, void *userData,
261 RtAudio::StreamOptions *options,
262 RtAudioErrorCallback errorCallback )
264 if ( stream_.state != STREAM_CLOSED ) {
265 errorText_ = "RtApi::openStream: a stream is already open!";
266 error( RtAudioError::INVALID_USE );
270 // Clear stream information potentially left from a previously open stream.
273 if ( oParams && oParams->nChannels < 1 ) {
274 errorText_ = "RtApi::openStream: a non-NULL output StreamParameters structure cannot have an nChannels value less than one.";
275 error( RtAudioError::INVALID_USE );
279 if ( iParams && iParams->nChannels < 1 ) {
280 errorText_ = "RtApi::openStream: a non-NULL input StreamParameters structure cannot have an nChannels value less than one.";
281 error( RtAudioError::INVALID_USE );
285 if ( oParams == NULL && iParams == NULL ) {
286 errorText_ = "RtApi::openStream: input and output StreamParameters structures are both NULL!";
287 error( RtAudioError::INVALID_USE );
291 if ( formatBytes(format) == 0 ) {
292 errorText_ = "RtApi::openStream: 'format' parameter value is undefined.";
293 error( RtAudioError::INVALID_USE );
297 unsigned int nDevices = getDeviceCount();
298 unsigned int oChannels = 0;
300 oChannels = oParams->nChannels;
301 if ( oParams->deviceId >= nDevices ) {
302 errorText_ = "RtApi::openStream: output device parameter value is invalid.";
303 error( RtAudioError::INVALID_USE );
308 unsigned int iChannels = 0;
310 iChannels = iParams->nChannels;
311 if ( iParams->deviceId >= nDevices ) {
312 errorText_ = "RtApi::openStream: input device parameter value is invalid.";
313 error( RtAudioError::INVALID_USE );
320 if ( oChannels > 0 ) {
322 result = probeDeviceOpen( oParams->deviceId, OUTPUT, oChannels, oParams->firstChannel,
323 sampleRate, format, bufferFrames, options );
324 if ( result == false ) {
325 error( RtAudioError::SYSTEM_ERROR );
330 if ( iChannels > 0 ) {
332 result = probeDeviceOpen( iParams->deviceId, INPUT, iChannels, iParams->firstChannel,
333 sampleRate, format, bufferFrames, options );
334 if ( result == false ) {
335 if ( oChannels > 0 ) closeStream();
336 error( RtAudioError::SYSTEM_ERROR );
341 stream_.callbackInfo.callback = (void *) callback;
342 stream_.callbackInfo.userData = userData;
343 stream_.callbackInfo.errorCallback = (void *) errorCallback;
345 if ( options ) options->numberOfBuffers = stream_.nBuffers;
346 stream_.state = STREAM_STOPPED;
349 unsigned int RtApi :: getDefaultInputDevice( void )
351 // Should be implemented in subclasses if possible.
355 unsigned int RtApi :: getDefaultOutputDevice( void )
357 // Should be implemented in subclasses if possible.
361 void RtApi :: closeStream( void )
363 // MUST be implemented in subclasses!
367 bool RtApi :: probeDeviceOpen( unsigned int /*device*/, StreamMode /*mode*/, unsigned int /*channels*/,
368 unsigned int /*firstChannel*/, unsigned int /*sampleRate*/,
369 RtAudioFormat /*format*/, unsigned int * /*bufferSize*/,
370 RtAudio::StreamOptions * /*options*/ )
372 // MUST be implemented in subclasses!
376 void RtApi :: tickStreamTime( void )
378 // Subclasses that do not provide their own implementation of
379 // getStreamTime should call this function once per buffer I/O to
380 // provide basic stream time support.
382 stream_.streamTime += ( stream_.bufferSize * 1.0 / stream_.sampleRate );
384 #if defined( HAVE_GETTIMEOFDAY )
385 gettimeofday( &stream_.lastTickTimestamp, NULL );
389 long RtApi :: getStreamLatency( void )
393 long totalLatency = 0;
394 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
395 totalLatency = stream_.latency[0];
396 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
397 totalLatency += stream_.latency[1];
402 double RtApi :: getStreamTime( void )
406 #if defined( HAVE_GETTIMEOFDAY )
407 // Return a very accurate estimate of the stream time by
408 // adding in the elapsed time since the last tick.
412 if ( stream_.state != STREAM_RUNNING || stream_.streamTime == 0.0 )
413 return stream_.streamTime;
415 gettimeofday( &now, NULL );
416 then = stream_.lastTickTimestamp;
417 return stream_.streamTime +
418 ((now.tv_sec + 0.000001 * now.tv_usec) -
419 (then.tv_sec + 0.000001 * then.tv_usec));
421 return stream_.streamTime;
425 void RtApi :: setStreamTime( double time )
430 stream_.streamTime = time;
431 #if defined( HAVE_GETTIMEOFDAY )
432 gettimeofday( &stream_.lastTickTimestamp, NULL );
436 unsigned int RtApi :: getStreamSampleRate( void )
440 return stream_.sampleRate;
444 // *************************************************** //
446 // OS/API-specific methods.
448 // *************************************************** //
450 #if defined(__MACOSX_CORE__)
452 // The OS X CoreAudio API is designed to use a separate callback
453 // procedure for each of its audio devices. A single RtAudio duplex
454 // stream using two different devices is supported here, though it
455 // cannot be guaranteed to always behave correctly because we cannot
456 // synchronize these two callbacks.
458 // A property listener is installed for over/underrun information.
459 // However, no functionality is currently provided to allow property
460 // listeners to trigger user handlers because it is unclear what could
461 // be done if a critical stream parameter (buffer size, sample rate,
462 // device disconnect) notification arrived. The listeners entail
463 // quite a bit of extra code and most likely, a user program wouldn't
464 // be prepared for the result anyway. However, we do provide a flag
465 // to the client callback function to inform of an over/underrun.
467 // A structure to hold various information related to the CoreAudio API
470 AudioDeviceID id[2]; // device ids
471 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
472 AudioDeviceIOProcID procId[2];
474 UInt32 iStream[2]; // device stream index (or first if using multiple)
475 UInt32 nStreams[2]; // number of streams to use
478 pthread_cond_t condition;
479 int drainCounter; // Tracks callback counts when draining
480 bool internalDrain; // Indicates if stop is initiated from callback or not.
483 :deviceBuffer(0), drainCounter(0), internalDrain(false) { nStreams[0] = 1; nStreams[1] = 1; id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
486 RtApiCore:: RtApiCore()
488 #if defined( AVAILABLE_MAC_OS_X_VERSION_10_6_AND_LATER )
489 // This is a largely undocumented but absolutely necessary
490 // requirement starting with OS-X 10.6. If not called, queries and
491 // updates to various audio device properties are not handled
493 CFRunLoopRef theRunLoop = NULL;
494 AudioObjectPropertyAddress property = { kAudioHardwarePropertyRunLoop,
495 kAudioObjectPropertyScopeGlobal,
496 kAudioObjectPropertyElementMaster };
497 OSStatus result = AudioObjectSetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, sizeof(CFRunLoopRef), &theRunLoop);
498 if ( result != noErr ) {
499 errorText_ = "RtApiCore::RtApiCore: error setting run loop property!";
500 error( RtAudioError::WARNING );
505 RtApiCore :: ~RtApiCore()
507 // The subclass destructor gets called before the base class
508 // destructor, so close an existing stream before deallocating
509 // apiDeviceId memory.
510 if ( stream_.state != STREAM_CLOSED ) closeStream();
513 unsigned int RtApiCore :: getDeviceCount( void )
515 // Find out how many audio devices there are, if any.
517 AudioObjectPropertyAddress propertyAddress = { kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
518 OSStatus result = AudioObjectGetPropertyDataSize( kAudioObjectSystemObject, &propertyAddress, 0, NULL, &dataSize );
519 if ( result != noErr ) {
520 errorText_ = "RtApiCore::getDeviceCount: OS-X error getting device info!";
521 error( RtAudioError::WARNING );
525 return dataSize / sizeof( AudioDeviceID );
528 unsigned int RtApiCore :: getDefaultInputDevice( void )
530 unsigned int nDevices = getDeviceCount();
531 if ( nDevices <= 1 ) return 0;
534 UInt32 dataSize = sizeof( AudioDeviceID );
535 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultInputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
536 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
537 if ( result != noErr ) {
538 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device.";
539 error( RtAudioError::WARNING );
543 dataSize *= nDevices;
544 AudioDeviceID deviceList[ nDevices ];
545 property.mSelector = kAudioHardwarePropertyDevices;
546 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
547 if ( result != noErr ) {
548 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device IDs.";
549 error( RtAudioError::WARNING );
553 for ( unsigned int i=0; i<nDevices; i++ )
554 if ( id == deviceList[i] ) return i;
556 errorText_ = "RtApiCore::getDefaultInputDevice: No default device found!";
557 error( RtAudioError::WARNING );
561 unsigned int RtApiCore :: getDefaultOutputDevice( void )
563 unsigned int nDevices = getDeviceCount();
564 if ( nDevices <= 1 ) return 0;
567 UInt32 dataSize = sizeof( AudioDeviceID );
568 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultOutputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
569 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
570 if ( result != noErr ) {
571 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device.";
572 error( RtAudioError::WARNING );
576 dataSize = sizeof( AudioDeviceID ) * nDevices;
577 AudioDeviceID deviceList[ nDevices ];
578 property.mSelector = kAudioHardwarePropertyDevices;
579 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
580 if ( result != noErr ) {
581 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device IDs.";
582 error( RtAudioError::WARNING );
586 for ( unsigned int i=0; i<nDevices; i++ )
587 if ( id == deviceList[i] ) return i;
589 errorText_ = "RtApiCore::getDefaultOutputDevice: No default device found!";
590 error( RtAudioError::WARNING );
594 RtAudio::DeviceInfo RtApiCore :: getDeviceInfo( unsigned int device )
596 RtAudio::DeviceInfo info;
600 unsigned int nDevices = getDeviceCount();
601 if ( nDevices == 0 ) {
602 errorText_ = "RtApiCore::getDeviceInfo: no devices found!";
603 error( RtAudioError::INVALID_USE );
607 if ( device >= nDevices ) {
608 errorText_ = "RtApiCore::getDeviceInfo: device ID is invalid!";
609 error( RtAudioError::INVALID_USE );
613 AudioDeviceID deviceList[ nDevices ];
614 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
615 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
616 kAudioObjectPropertyScopeGlobal,
617 kAudioObjectPropertyElementMaster };
618 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
619 0, NULL, &dataSize, (void *) &deviceList );
620 if ( result != noErr ) {
621 errorText_ = "RtApiCore::getDeviceInfo: OS-X system error getting device IDs.";
622 error( RtAudioError::WARNING );
626 AudioDeviceID id = deviceList[ device ];
628 // Get the device name.
631 dataSize = sizeof( CFStringRef );
632 property.mSelector = kAudioObjectPropertyManufacturer;
633 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
634 if ( result != noErr ) {
635 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device manufacturer.";
636 errorText_ = errorStream_.str();
637 error( RtAudioError::WARNING );
641 //const char *mname = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
642 int length = CFStringGetLength(cfname);
643 char *mname = (char *)malloc(length * 3 + 1);
644 #if defined( UNICODE ) || defined( _UNICODE )
645 CFStringGetCString(cfname, mname, length * 3 + 1, kCFStringEncodingUTF8);
647 CFStringGetCString(cfname, mname, length * 3 + 1, CFStringGetSystemEncoding());
649 info.name.append( (const char *)mname, strlen(mname) );
650 info.name.append( ": " );
654 property.mSelector = kAudioObjectPropertyName;
655 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
656 if ( result != noErr ) {
657 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device name.";
658 errorText_ = errorStream_.str();
659 error( RtAudioError::WARNING );
663 //const char *name = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
664 length = CFStringGetLength(cfname);
665 char *name = (char *)malloc(length * 3 + 1);
666 #if defined( UNICODE ) || defined( _UNICODE )
667 CFStringGetCString(cfname, name, length * 3 + 1, kCFStringEncodingUTF8);
669 CFStringGetCString(cfname, name, length * 3 + 1, CFStringGetSystemEncoding());
671 info.name.append( (const char *)name, strlen(name) );
675 // Get the output stream "configuration".
676 AudioBufferList *bufferList = nil;
677 property.mSelector = kAudioDevicePropertyStreamConfiguration;
678 property.mScope = kAudioDevicePropertyScopeOutput;
679 // property.mElement = kAudioObjectPropertyElementWildcard;
681 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
682 if ( result != noErr || dataSize == 0 ) {
683 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration info for device (" << device << ").";
684 errorText_ = errorStream_.str();
685 error( RtAudioError::WARNING );
689 // Allocate the AudioBufferList.
690 bufferList = (AudioBufferList *) malloc( dataSize );
691 if ( bufferList == NULL ) {
692 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating output AudioBufferList.";
693 error( RtAudioError::WARNING );
697 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
698 if ( result != noErr || dataSize == 0 ) {
700 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration for device (" << device << ").";
701 errorText_ = errorStream_.str();
702 error( RtAudioError::WARNING );
706 // Get output channel information.
707 unsigned int i, nStreams = bufferList->mNumberBuffers;
708 for ( i=0; i<nStreams; i++ )
709 info.outputChannels += bufferList->mBuffers[i].mNumberChannels;
712 // Get the input stream "configuration".
713 property.mScope = kAudioDevicePropertyScopeInput;
714 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
715 if ( result != noErr || dataSize == 0 ) {
716 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration info for device (" << device << ").";
717 errorText_ = errorStream_.str();
718 error( RtAudioError::WARNING );
722 // Allocate the AudioBufferList.
723 bufferList = (AudioBufferList *) malloc( dataSize );
724 if ( bufferList == NULL ) {
725 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating input AudioBufferList.";
726 error( RtAudioError::WARNING );
730 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
731 if (result != noErr || dataSize == 0) {
733 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration for device (" << device << ").";
734 errorText_ = errorStream_.str();
735 error( RtAudioError::WARNING );
739 // Get input channel information.
740 nStreams = bufferList->mNumberBuffers;
741 for ( i=0; i<nStreams; i++ )
742 info.inputChannels += bufferList->mBuffers[i].mNumberChannels;
745 // If device opens for both playback and capture, we determine the channels.
746 if ( info.outputChannels > 0 && info.inputChannels > 0 )
747 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
749 // Probe the device sample rates.
750 bool isInput = false;
751 if ( info.outputChannels == 0 ) isInput = true;
753 // Determine the supported sample rates.
754 property.mSelector = kAudioDevicePropertyAvailableNominalSampleRates;
755 if ( isInput == false ) property.mScope = kAudioDevicePropertyScopeOutput;
756 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
757 if ( result != kAudioHardwareNoError || dataSize == 0 ) {
758 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rate info.";
759 errorText_ = errorStream_.str();
760 error( RtAudioError::WARNING );
764 UInt32 nRanges = dataSize / sizeof( AudioValueRange );
765 AudioValueRange rangeList[ nRanges ];
766 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &rangeList );
767 if ( result != kAudioHardwareNoError ) {
768 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rates.";
769 errorText_ = errorStream_.str();
770 error( RtAudioError::WARNING );
774 // The sample rate reporting mechanism is a bit of a mystery. It
775 // seems that it can either return individual rates or a range of
776 // rates. I assume that if the min / max range values are the same,
777 // then that represents a single supported rate and if the min / max
778 // range values are different, the device supports an arbitrary
779 // range of values (though there might be multiple ranges, so we'll
780 // use the most conservative range).
781 Float64 minimumRate = 1.0, maximumRate = 10000000000.0;
782 bool haveValueRange = false;
783 info.sampleRates.clear();
784 for ( UInt32 i=0; i<nRanges; i++ ) {
785 if ( rangeList[i].mMinimum == rangeList[i].mMaximum ) {
786 unsigned int tmpSr = (unsigned int) rangeList[i].mMinimum;
787 info.sampleRates.push_back( tmpSr );
789 if ( !info.preferredSampleRate || ( tmpSr <= 48000 && tmpSr > info.preferredSampleRate ) )
790 info.preferredSampleRate = tmpSr;
793 haveValueRange = true;
794 if ( rangeList[i].mMinimum > minimumRate ) minimumRate = rangeList[i].mMinimum;
795 if ( rangeList[i].mMaximum < maximumRate ) maximumRate = rangeList[i].mMaximum;
799 if ( haveValueRange ) {
800 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
801 if ( SAMPLE_RATES[k] >= (unsigned int) minimumRate && SAMPLE_RATES[k] <= (unsigned int) maximumRate ) {
802 info.sampleRates.push_back( SAMPLE_RATES[k] );
804 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
805 info.preferredSampleRate = SAMPLE_RATES[k];
810 // Sort and remove any redundant values
811 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
812 info.sampleRates.erase( unique( info.sampleRates.begin(), info.sampleRates.end() ), info.sampleRates.end() );
814 if ( info.sampleRates.size() == 0 ) {
815 errorStream_ << "RtApiCore::probeDeviceInfo: No supported sample rates found for device (" << device << ").";
816 errorText_ = errorStream_.str();
817 error( RtAudioError::WARNING );
821 // CoreAudio always uses 32-bit floating point data for PCM streams.
822 // Thus, any other "physical" formats supported by the device are of
823 // no interest to the client.
824 info.nativeFormats = RTAUDIO_FLOAT32;
826 if ( info.outputChannels > 0 )
827 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
828 if ( info.inputChannels > 0 )
829 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
835 static OSStatus callbackHandler( AudioDeviceID inDevice,
836 const AudioTimeStamp* /*inNow*/,
837 const AudioBufferList* inInputData,
838 const AudioTimeStamp* /*inInputTime*/,
839 AudioBufferList* outOutputData,
840 const AudioTimeStamp* /*inOutputTime*/,
843 CallbackInfo *info = (CallbackInfo *) infoPointer;
845 RtApiCore *object = (RtApiCore *) info->object;
846 if ( object->callbackEvent( inDevice, inInputData, outOutputData ) == false )
847 return kAudioHardwareUnspecifiedError;
849 return kAudioHardwareNoError;
852 static OSStatus xrunListener( AudioObjectID /*inDevice*/,
854 const AudioObjectPropertyAddress properties[],
855 void* handlePointer )
857 CoreHandle *handle = (CoreHandle *) handlePointer;
858 for ( UInt32 i=0; i<nAddresses; i++ ) {
859 if ( properties[i].mSelector == kAudioDeviceProcessorOverload ) {
860 if ( properties[i].mScope == kAudioDevicePropertyScopeInput )
861 handle->xrun[1] = true;
863 handle->xrun[0] = true;
867 return kAudioHardwareNoError;
870 static OSStatus rateListener( AudioObjectID inDevice,
871 UInt32 /*nAddresses*/,
872 const AudioObjectPropertyAddress /*properties*/[],
875 Float64 *rate = (Float64 *) ratePointer;
876 UInt32 dataSize = sizeof( Float64 );
877 AudioObjectPropertyAddress property = { kAudioDevicePropertyNominalSampleRate,
878 kAudioObjectPropertyScopeGlobal,
879 kAudioObjectPropertyElementMaster };
880 AudioObjectGetPropertyData( inDevice, &property, 0, NULL, &dataSize, rate );
881 return kAudioHardwareNoError;
884 bool RtApiCore :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
885 unsigned int firstChannel, unsigned int sampleRate,
886 RtAudioFormat format, unsigned int *bufferSize,
887 RtAudio::StreamOptions *options )
890 unsigned int nDevices = getDeviceCount();
891 if ( nDevices == 0 ) {
892 // This should not happen because a check is made before this function is called.
893 errorText_ = "RtApiCore::probeDeviceOpen: no devices found!";
897 if ( device >= nDevices ) {
898 // This should not happen because a check is made before this function is called.
899 errorText_ = "RtApiCore::probeDeviceOpen: device ID is invalid!";
903 AudioDeviceID deviceList[ nDevices ];
904 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
905 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
906 kAudioObjectPropertyScopeGlobal,
907 kAudioObjectPropertyElementMaster };
908 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
909 0, NULL, &dataSize, (void *) &deviceList );
910 if ( result != noErr ) {
911 errorText_ = "RtApiCore::probeDeviceOpen: OS-X system error getting device IDs.";
915 AudioDeviceID id = deviceList[ device ];
917 // Setup for stream mode.
918 bool isInput = false;
919 if ( mode == INPUT ) {
921 property.mScope = kAudioDevicePropertyScopeInput;
924 property.mScope = kAudioDevicePropertyScopeOutput;
926 // Get the stream "configuration".
927 AudioBufferList *bufferList = nil;
929 property.mSelector = kAudioDevicePropertyStreamConfiguration;
930 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
931 if ( result != noErr || dataSize == 0 ) {
932 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration info for device (" << device << ").";
933 errorText_ = errorStream_.str();
937 // Allocate the AudioBufferList.
938 bufferList = (AudioBufferList *) malloc( dataSize );
939 if ( bufferList == NULL ) {
940 errorText_ = "RtApiCore::probeDeviceOpen: memory error allocating AudioBufferList.";
944 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
945 if (result != noErr || dataSize == 0) {
947 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration for device (" << device << ").";
948 errorText_ = errorStream_.str();
952 // Search for one or more streams that contain the desired number of
953 // channels. CoreAudio devices can have an arbitrary number of
954 // streams and each stream can have an arbitrary number of channels.
955 // For each stream, a single buffer of interleaved samples is
956 // provided. RtAudio prefers the use of one stream of interleaved
957 // data or multiple consecutive single-channel streams. However, we
958 // now support multiple consecutive multi-channel streams of
959 // interleaved data as well.
960 UInt32 iStream, offsetCounter = firstChannel;
961 UInt32 nStreams = bufferList->mNumberBuffers;
962 bool monoMode = false;
963 bool foundStream = false;
965 // First check that the device supports the requested number of
967 UInt32 deviceChannels = 0;
968 for ( iStream=0; iStream<nStreams; iStream++ )
969 deviceChannels += bufferList->mBuffers[iStream].mNumberChannels;
971 if ( deviceChannels < ( channels + firstChannel ) ) {
973 errorStream_ << "RtApiCore::probeDeviceOpen: the device (" << device << ") does not support the requested channel count.";
974 errorText_ = errorStream_.str();
978 // Look for a single stream meeting our needs.
979 UInt32 firstStream, streamCount = 1, streamChannels = 0, channelOffset = 0;
980 for ( iStream=0; iStream<nStreams; iStream++ ) {
981 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
982 if ( streamChannels >= channels + offsetCounter ) {
983 firstStream = iStream;
984 channelOffset = offsetCounter;
988 if ( streamChannels > offsetCounter ) break;
989 offsetCounter -= streamChannels;
992 // If we didn't find a single stream above, then we should be able
993 // to meet the channel specification with multiple streams.
994 if ( foundStream == false ) {
996 offsetCounter = firstChannel;
997 for ( iStream=0; iStream<nStreams; iStream++ ) {
998 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
999 if ( streamChannels > offsetCounter ) break;
1000 offsetCounter -= streamChannels;
1003 firstStream = iStream;
1004 channelOffset = offsetCounter;
1005 Int32 channelCounter = channels + offsetCounter - streamChannels;
1007 if ( streamChannels > 1 ) monoMode = false;
1008 while ( channelCounter > 0 ) {
1009 streamChannels = bufferList->mBuffers[++iStream].mNumberChannels;
1010 if ( streamChannels > 1 ) monoMode = false;
1011 channelCounter -= streamChannels;
1018 // Determine the buffer size.
1019 AudioValueRange bufferRange;
1020 dataSize = sizeof( AudioValueRange );
1021 property.mSelector = kAudioDevicePropertyBufferFrameSizeRange;
1022 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &bufferRange );
1024 if ( result != noErr ) {
1025 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting buffer size range for device (" << device << ").";
1026 errorText_ = errorStream_.str();
1030 if ( bufferRange.mMinimum > *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMinimum;
1031 else if ( bufferRange.mMaximum < *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMaximum;
1032 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) *bufferSize = (unsigned long) bufferRange.mMinimum;
1034 // Set the buffer size. For multiple streams, I'm assuming we only
1035 // need to make this setting for the master channel.
1036 UInt32 theSize = (UInt32) *bufferSize;
1037 dataSize = sizeof( UInt32 );
1038 property.mSelector = kAudioDevicePropertyBufferFrameSize;
1039 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &theSize );
1041 if ( result != noErr ) {
1042 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting the buffer size for device (" << device << ").";
1043 errorText_ = errorStream_.str();
1047 // If attempting to setup a duplex stream, the bufferSize parameter
1048 // MUST be the same in both directions!
1049 *bufferSize = theSize;
1050 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
1051 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << device << ").";
1052 errorText_ = errorStream_.str();
1056 stream_.bufferSize = *bufferSize;
1057 stream_.nBuffers = 1;
1059 // Try to set "hog" mode ... it's not clear to me this is working.
1060 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) {
1062 dataSize = sizeof( hog_pid );
1063 property.mSelector = kAudioDevicePropertyHogMode;
1064 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &hog_pid );
1065 if ( result != noErr ) {
1066 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting 'hog' state!";
1067 errorText_ = errorStream_.str();
1071 if ( hog_pid != getpid() ) {
1073 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &hog_pid );
1074 if ( result != noErr ) {
1075 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting 'hog' state!";
1076 errorText_ = errorStream_.str();
1082 // Check and if necessary, change the sample rate for the device.
1083 Float64 nominalRate;
1084 dataSize = sizeof( Float64 );
1085 property.mSelector = kAudioDevicePropertyNominalSampleRate;
1086 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &nominalRate );
1087 if ( result != noErr ) {
1088 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting current sample rate.";
1089 errorText_ = errorStream_.str();
1093 // Only change the sample rate if off by more than 1 Hz.
1094 if ( fabs( nominalRate - (double)sampleRate ) > 1.0 ) {
1096 // Set a property listener for the sample rate change
1097 Float64 reportedRate = 0.0;
1098 AudioObjectPropertyAddress tmp = { kAudioDevicePropertyNominalSampleRate, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
1099 result = AudioObjectAddPropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1100 if ( result != noErr ) {
1101 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate property listener for device (" << device << ").";
1102 errorText_ = errorStream_.str();
1106 nominalRate = (Float64) sampleRate;
1107 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &nominalRate );
1108 if ( result != noErr ) {
1109 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1110 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate for device (" << device << ").";
1111 errorText_ = errorStream_.str();
1115 // Now wait until the reported nominal rate is what we just set.
1116 UInt32 microCounter = 0;
1117 while ( reportedRate != nominalRate ) {
1118 microCounter += 5000;
1119 if ( microCounter > 5000000 ) break;
1123 // Remove the property listener.
1124 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1126 if ( microCounter > 5000000 ) {
1127 errorStream_ << "RtApiCore::probeDeviceOpen: timeout waiting for sample rate update for device (" << device << ").";
1128 errorText_ = errorStream_.str();
1133 // Now set the stream format for all streams. Also, check the
1134 // physical format of the device and change that if necessary.
1135 AudioStreamBasicDescription description;
1136 dataSize = sizeof( AudioStreamBasicDescription );
1137 property.mSelector = kAudioStreamPropertyVirtualFormat;
1138 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
1139 if ( result != noErr ) {
1140 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream format for device (" << device << ").";
1141 errorText_ = errorStream_.str();
1145 // Set the sample rate and data format id. However, only make the
1146 // change if the sample rate is not within 1.0 of the desired
1147 // rate and the format is not linear pcm.
1148 bool updateFormat = false;
1149 if ( fabs( description.mSampleRate - (Float64)sampleRate ) > 1.0 ) {
1150 description.mSampleRate = (Float64) sampleRate;
1151 updateFormat = true;
1154 if ( description.mFormatID != kAudioFormatLinearPCM ) {
1155 description.mFormatID = kAudioFormatLinearPCM;
1156 updateFormat = true;
1159 if ( updateFormat ) {
1160 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &description );
1161 if ( result != noErr ) {
1162 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate or data format for device (" << device << ").";
1163 errorText_ = errorStream_.str();
1168 // Now check the physical format.
1169 property.mSelector = kAudioStreamPropertyPhysicalFormat;
1170 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
1171 if ( result != noErr ) {
1172 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream physical format for device (" << device << ").";
1173 errorText_ = errorStream_.str();
1177 //std::cout << "Current physical stream format:" << std::endl;
1178 //std::cout << " mBitsPerChan = " << description.mBitsPerChannel << std::endl;
1179 //std::cout << " aligned high = " << (description.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (description.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
1180 //std::cout << " bytesPerFrame = " << description.mBytesPerFrame << std::endl;
1181 //std::cout << " sample rate = " << description.mSampleRate << std::endl;
1183 if ( description.mFormatID != kAudioFormatLinearPCM || description.mBitsPerChannel < 16 ) {
1184 description.mFormatID = kAudioFormatLinearPCM;
1185 //description.mSampleRate = (Float64) sampleRate;
1186 AudioStreamBasicDescription testDescription = description;
1189 // We'll try higher bit rates first and then work our way down.
1190 std::vector< std::pair<UInt32, UInt32> > physicalFormats;
1191 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsFloat) & ~kLinearPCMFormatFlagIsSignedInteger;
1192 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
1193 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
1194 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
1195 physicalFormats.push_back( std::pair<Float32, UInt32>( 24, formatFlags ) ); // 24-bit packed
1196 formatFlags &= ~( kAudioFormatFlagIsPacked | kAudioFormatFlagIsAlignedHigh );
1197 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.2, formatFlags ) ); // 24-bit in 4 bytes, aligned low
1198 formatFlags |= kAudioFormatFlagIsAlignedHigh;
1199 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.4, formatFlags ) ); // 24-bit in 4 bytes, aligned high
1200 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
1201 physicalFormats.push_back( std::pair<Float32, UInt32>( 16, formatFlags ) );
1202 physicalFormats.push_back( std::pair<Float32, UInt32>( 8, formatFlags ) );
1204 bool setPhysicalFormat = false;
1205 for( unsigned int i=0; i<physicalFormats.size(); i++ ) {
1206 testDescription = description;
1207 testDescription.mBitsPerChannel = (UInt32) physicalFormats[i].first;
1208 testDescription.mFormatFlags = physicalFormats[i].second;
1209 if ( (24 == (UInt32)physicalFormats[i].first) && ~( physicalFormats[i].second & kAudioFormatFlagIsPacked ) )
1210 testDescription.mBytesPerFrame = 4 * testDescription.mChannelsPerFrame;
1212 testDescription.mBytesPerFrame = testDescription.mBitsPerChannel/8 * testDescription.mChannelsPerFrame;
1213 testDescription.mBytesPerPacket = testDescription.mBytesPerFrame * testDescription.mFramesPerPacket;
1214 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &testDescription );
1215 if ( result == noErr ) {
1216 setPhysicalFormat = true;
1217 //std::cout << "Updated physical stream format:" << std::endl;
1218 //std::cout << " mBitsPerChan = " << testDescription.mBitsPerChannel << std::endl;
1219 //std::cout << " aligned high = " << (testDescription.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (testDescription.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
1220 //std::cout << " bytesPerFrame = " << testDescription.mBytesPerFrame << std::endl;
1221 //std::cout << " sample rate = " << testDescription.mSampleRate << std::endl;
1226 if ( !setPhysicalFormat ) {
1227 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting physical data format for device (" << device << ").";
1228 errorText_ = errorStream_.str();
1231 } // done setting virtual/physical formats.
1233 // Get the stream / device latency.
1235 dataSize = sizeof( UInt32 );
1236 property.mSelector = kAudioDevicePropertyLatency;
1237 if ( AudioObjectHasProperty( id, &property ) == true ) {
1238 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &latency );
1239 if ( result == kAudioHardwareNoError ) stream_.latency[ mode ] = latency;
1241 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting device latency for device (" << device << ").";
1242 errorText_ = errorStream_.str();
1243 error( RtAudioError::WARNING );
1247 // Byte-swapping: According to AudioHardware.h, the stream data will
1248 // always be presented in native-endian format, so we should never
1249 // need to byte swap.
1250 stream_.doByteSwap[mode] = false;
1252 // From the CoreAudio documentation, PCM data must be supplied as
1254 stream_.userFormat = format;
1255 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
1257 if ( streamCount == 1 )
1258 stream_.nDeviceChannels[mode] = description.mChannelsPerFrame;
1259 else // multiple streams
1260 stream_.nDeviceChannels[mode] = channels;
1261 stream_.nUserChannels[mode] = channels;
1262 stream_.channelOffset[mode] = channelOffset; // offset within a CoreAudio stream
1263 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
1264 else stream_.userInterleaved = true;
1265 stream_.deviceInterleaved[mode] = true;
1266 if ( monoMode == true ) stream_.deviceInterleaved[mode] = false;
1268 // Set flags for buffer conversion.
1269 stream_.doConvertBuffer[mode] = false;
1270 if ( stream_.userFormat != stream_.deviceFormat[mode] )
1271 stream_.doConvertBuffer[mode] = true;
1272 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
1273 stream_.doConvertBuffer[mode] = true;
1274 if ( streamCount == 1 ) {
1275 if ( stream_.nUserChannels[mode] > 1 &&
1276 stream_.userInterleaved != stream_.deviceInterleaved[mode] )
1277 stream_.doConvertBuffer[mode] = true;
1279 else if ( monoMode && stream_.userInterleaved )
1280 stream_.doConvertBuffer[mode] = true;
1282 // Allocate our CoreHandle structure for the stream.
1283 CoreHandle *handle = 0;
1284 if ( stream_.apiHandle == 0 ) {
1286 handle = new CoreHandle;
1288 catch ( std::bad_alloc& ) {
1289 errorText_ = "RtApiCore::probeDeviceOpen: error allocating CoreHandle memory.";
1293 if ( pthread_cond_init( &handle->condition, NULL ) ) {
1294 errorText_ = "RtApiCore::probeDeviceOpen: error initializing pthread condition variable.";
1297 stream_.apiHandle = (void *) handle;
1300 handle = (CoreHandle *) stream_.apiHandle;
1301 handle->iStream[mode] = firstStream;
1302 handle->nStreams[mode] = streamCount;
1303 handle->id[mode] = id;
1305 // Allocate necessary internal buffers.
1306 unsigned long bufferBytes;
1307 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
1308 // stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
1309 stream_.userBuffer[mode] = (char *) malloc( bufferBytes * sizeof(char) );
1310 memset( stream_.userBuffer[mode], 0, bufferBytes * sizeof(char) );
1311 if ( stream_.userBuffer[mode] == NULL ) {
1312 errorText_ = "RtApiCore::probeDeviceOpen: error allocating user buffer memory.";
1316 // If possible, we will make use of the CoreAudio stream buffers as
1317 // "device buffers". However, we can't do this if using multiple
1319 if ( stream_.doConvertBuffer[mode] && handle->nStreams[mode] > 1 ) {
1321 bool makeBuffer = true;
1322 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
1323 if ( mode == INPUT ) {
1324 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
1325 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
1326 if ( bufferBytes <= bytesOut ) makeBuffer = false;
1331 bufferBytes *= *bufferSize;
1332 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
1333 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
1334 if ( stream_.deviceBuffer == NULL ) {
1335 errorText_ = "RtApiCore::probeDeviceOpen: error allocating device buffer memory.";
1341 stream_.sampleRate = sampleRate;
1342 stream_.device[mode] = device;
1343 stream_.state = STREAM_STOPPED;
1344 stream_.callbackInfo.object = (void *) this;
1346 // Setup the buffer conversion information structure.
1347 if ( stream_.doConvertBuffer[mode] ) {
1348 if ( streamCount > 1 ) setConvertInfo( mode, 0 );
1349 else setConvertInfo( mode, channelOffset );
1352 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device )
1353 // Only one callback procedure per device.
1354 stream_.mode = DUPLEX;
1356 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1357 result = AudioDeviceCreateIOProcID( id, callbackHandler, (void *) &stream_.callbackInfo, &handle->procId[mode] );
1359 // deprecated in favor of AudioDeviceCreateIOProcID()
1360 result = AudioDeviceAddIOProc( id, callbackHandler, (void *) &stream_.callbackInfo );
1362 if ( result != noErr ) {
1363 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting callback for device (" << device << ").";
1364 errorText_ = errorStream_.str();
1367 if ( stream_.mode == OUTPUT && mode == INPUT )
1368 stream_.mode = DUPLEX;
1370 stream_.mode = mode;
1373 // Setup the device property listener for over/underload.
1374 property.mSelector = kAudioDeviceProcessorOverload;
1375 property.mScope = kAudioObjectPropertyScopeGlobal;
1376 result = AudioObjectAddPropertyListener( id, &property, xrunListener, (void *) handle );
1382 pthread_cond_destroy( &handle->condition );
1384 stream_.apiHandle = 0;
1387 for ( int i=0; i<2; i++ ) {
1388 if ( stream_.userBuffer[i] ) {
1389 free( stream_.userBuffer[i] );
1390 stream_.userBuffer[i] = 0;
1394 if ( stream_.deviceBuffer ) {
1395 free( stream_.deviceBuffer );
1396 stream_.deviceBuffer = 0;
1399 stream_.state = STREAM_CLOSED;
1403 void RtApiCore :: closeStream( void )
1405 if ( stream_.state == STREAM_CLOSED ) {
1406 errorText_ = "RtApiCore::closeStream(): no open stream to close!";
1407 error( RtAudioError::WARNING );
1411 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1412 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1414 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
1415 kAudioObjectPropertyScopeGlobal,
1416 kAudioObjectPropertyElementMaster };
1418 property.mSelector = kAudioDeviceProcessorOverload;
1419 property.mScope = kAudioObjectPropertyScopeGlobal;
1420 if (AudioObjectRemovePropertyListener( handle->id[0], &property, xrunListener, (void *) handle ) != noErr) {
1421 errorText_ = "RtApiCore::closeStream(): error removing property listener!";
1422 error( RtAudioError::WARNING );
1425 if ( stream_.state == STREAM_RUNNING )
1426 AudioDeviceStop( handle->id[0], callbackHandler );
1427 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1428 AudioDeviceDestroyIOProcID( handle->id[0], handle->procId[0] );
1430 // deprecated in favor of AudioDeviceDestroyIOProcID()
1431 AudioDeviceRemoveIOProc( handle->id[0], callbackHandler );
1435 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1437 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
1438 kAudioObjectPropertyScopeGlobal,
1439 kAudioObjectPropertyElementMaster };
1441 property.mSelector = kAudioDeviceProcessorOverload;
1442 property.mScope = kAudioObjectPropertyScopeGlobal;
1443 if (AudioObjectRemovePropertyListener( handle->id[1], &property, xrunListener, (void *) handle ) != noErr) {
1444 errorText_ = "RtApiCore::closeStream(): error removing property listener!";
1445 error( RtAudioError::WARNING );
1448 if ( stream_.state == STREAM_RUNNING )
1449 AudioDeviceStop( handle->id[1], callbackHandler );
1450 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1451 AudioDeviceDestroyIOProcID( handle->id[1], handle->procId[1] );
1453 // deprecated in favor of AudioDeviceDestroyIOProcID()
1454 AudioDeviceRemoveIOProc( handle->id[1], callbackHandler );
1458 for ( int i=0; i<2; i++ ) {
1459 if ( stream_.userBuffer[i] ) {
1460 free( stream_.userBuffer[i] );
1461 stream_.userBuffer[i] = 0;
1465 if ( stream_.deviceBuffer ) {
1466 free( stream_.deviceBuffer );
1467 stream_.deviceBuffer = 0;
1470 // Destroy pthread condition variable.
1471 pthread_cond_destroy( &handle->condition );
1473 stream_.apiHandle = 0;
1475 stream_.mode = UNINITIALIZED;
1476 stream_.state = STREAM_CLOSED;
1479 void RtApiCore :: startStream( void )
1482 if ( stream_.state == STREAM_RUNNING ) {
1483 errorText_ = "RtApiCore::startStream(): the stream is already running!";
1484 error( RtAudioError::WARNING );
1488 OSStatus result = noErr;
1489 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1490 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1492 result = AudioDeviceStart( handle->id[0], callbackHandler );
1493 if ( result != noErr ) {
1494 errorStream_ << "RtApiCore::startStream: system error (" << getErrorCode( result ) << ") starting callback procedure on device (" << stream_.device[0] << ").";
1495 errorText_ = errorStream_.str();
1500 if ( stream_.mode == INPUT ||
1501 ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1503 result = AudioDeviceStart( handle->id[1], callbackHandler );
1504 if ( result != noErr ) {
1505 errorStream_ << "RtApiCore::startStream: system error starting input callback procedure on device (" << stream_.device[1] << ").";
1506 errorText_ = errorStream_.str();
1511 handle->drainCounter = 0;
1512 handle->internalDrain = false;
1513 stream_.state = STREAM_RUNNING;
1516 if ( result == noErr ) return;
1517 error( RtAudioError::SYSTEM_ERROR );
1520 void RtApiCore :: stopStream( void )
1523 if ( stream_.state == STREAM_STOPPED ) {
1524 errorText_ = "RtApiCore::stopStream(): the stream is already stopped!";
1525 error( RtAudioError::WARNING );
1529 OSStatus result = noErr;
1530 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1531 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1533 if ( handle->drainCounter == 0 ) {
1534 handle->drainCounter = 2;
1535 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
1538 result = AudioDeviceStop( handle->id[0], callbackHandler );
1539 if ( result != noErr ) {
1540 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping callback procedure on device (" << stream_.device[0] << ").";
1541 errorText_ = errorStream_.str();
1546 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1548 result = AudioDeviceStop( handle->id[1], callbackHandler );
1549 if ( result != noErr ) {
1550 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping input callback procedure on device (" << stream_.device[1] << ").";
1551 errorText_ = errorStream_.str();
1556 stream_.state = STREAM_STOPPED;
1559 if ( result == noErr ) return;
1560 error( RtAudioError::SYSTEM_ERROR );
1563 void RtApiCore :: abortStream( void )
1566 if ( stream_.state == STREAM_STOPPED ) {
1567 errorText_ = "RtApiCore::abortStream(): the stream is already stopped!";
1568 error( RtAudioError::WARNING );
1572 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1573 handle->drainCounter = 2;
1578 // This function will be called by a spawned thread when the user
1579 // callback function signals that the stream should be stopped or
1580 // aborted. It is better to handle it this way because the
1581 // callbackEvent() function probably should return before the AudioDeviceStop()
1582 // function is called.
1583 static void *coreStopStream( void *ptr )
1585 CallbackInfo *info = (CallbackInfo *) ptr;
1586 RtApiCore *object = (RtApiCore *) info->object;
1588 object->stopStream();
1589 pthread_exit( NULL );
1592 bool RtApiCore :: callbackEvent( AudioDeviceID deviceId,
1593 const AudioBufferList *inBufferList,
1594 const AudioBufferList *outBufferList )
1596 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
1597 if ( stream_.state == STREAM_CLOSED ) {
1598 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
1599 error( RtAudioError::WARNING );
1603 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
1604 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1606 // Check if we were draining the stream and signal is finished.
1607 if ( handle->drainCounter > 3 ) {
1608 ThreadHandle threadId;
1610 stream_.state = STREAM_STOPPING;
1611 if ( handle->internalDrain == true )
1612 pthread_create( &threadId, NULL, coreStopStream, info );
1613 else // external call to stopStream()
1614 pthread_cond_signal( &handle->condition );
1618 AudioDeviceID outputDevice = handle->id[0];
1620 // Invoke user callback to get fresh output data UNLESS we are
1621 // draining stream or duplex mode AND the input/output devices are
1622 // different AND this function is called for the input device.
1623 if ( handle->drainCounter == 0 && ( stream_.mode != DUPLEX || deviceId == outputDevice ) ) {
1624 RtAudioCallback callback = (RtAudioCallback) info->callback;
1625 double streamTime = getStreamTime();
1626 RtAudioStreamStatus status = 0;
1627 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
1628 status |= RTAUDIO_OUTPUT_UNDERFLOW;
1629 handle->xrun[0] = false;
1631 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
1632 status |= RTAUDIO_INPUT_OVERFLOW;
1633 handle->xrun[1] = false;
1636 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
1637 stream_.bufferSize, streamTime, status, info->userData );
1638 if ( cbReturnValue == 2 ) {
1639 stream_.state = STREAM_STOPPING;
1640 handle->drainCounter = 2;
1644 else if ( cbReturnValue == 1 ) {
1645 handle->drainCounter = 1;
1646 handle->internalDrain = true;
1650 if ( stream_.mode == OUTPUT || ( stream_.mode == DUPLEX && deviceId == outputDevice ) ) {
1652 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
1654 if ( handle->nStreams[0] == 1 ) {
1655 memset( outBufferList->mBuffers[handle->iStream[0]].mData,
1657 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
1659 else { // fill multiple streams with zeros
1660 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
1661 memset( outBufferList->mBuffers[handle->iStream[0]+i].mData,
1663 outBufferList->mBuffers[handle->iStream[0]+i].mDataByteSize );
1667 else if ( handle->nStreams[0] == 1 ) {
1668 if ( stream_.doConvertBuffer[0] ) { // convert directly to CoreAudio stream buffer
1669 convertBuffer( (char *) outBufferList->mBuffers[handle->iStream[0]].mData,
1670 stream_.userBuffer[0], stream_.convertInfo[0] );
1672 else { // copy from user buffer
1673 memcpy( outBufferList->mBuffers[handle->iStream[0]].mData,
1674 stream_.userBuffer[0],
1675 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
1678 else { // fill multiple streams
1679 Float32 *inBuffer = (Float32 *) stream_.userBuffer[0];
1680 if ( stream_.doConvertBuffer[0] ) {
1681 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
1682 inBuffer = (Float32 *) stream_.deviceBuffer;
1685 if ( stream_.deviceInterleaved[0] == false ) { // mono mode
1686 UInt32 bufferBytes = outBufferList->mBuffers[handle->iStream[0]].mDataByteSize;
1687 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
1688 memcpy( outBufferList->mBuffers[handle->iStream[0]+i].mData,
1689 (void *)&inBuffer[i*stream_.bufferSize], bufferBytes );
1692 else { // fill multiple multi-channel streams with interleaved data
1693 UInt32 streamChannels, channelsLeft, inJump, outJump, inOffset;
1696 bool inInterleaved = ( stream_.userInterleaved ) ? true : false;
1697 UInt32 inChannels = stream_.nUserChannels[0];
1698 if ( stream_.doConvertBuffer[0] ) {
1699 inInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
1700 inChannels = stream_.nDeviceChannels[0];
1703 if ( inInterleaved ) inOffset = 1;
1704 else inOffset = stream_.bufferSize;
1706 channelsLeft = inChannels;
1707 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
1709 out = (Float32 *) outBufferList->mBuffers[handle->iStream[0]+i].mData;
1710 streamChannels = outBufferList->mBuffers[handle->iStream[0]+i].mNumberChannels;
1713 // Account for possible channel offset in first stream
1714 if ( i == 0 && stream_.channelOffset[0] > 0 ) {
1715 streamChannels -= stream_.channelOffset[0];
1716 outJump = stream_.channelOffset[0];
1720 // Account for possible unfilled channels at end of the last stream
1721 if ( streamChannels > channelsLeft ) {
1722 outJump = streamChannels - channelsLeft;
1723 streamChannels = channelsLeft;
1726 // Determine input buffer offsets and skips
1727 if ( inInterleaved ) {
1728 inJump = inChannels;
1729 in += inChannels - channelsLeft;
1733 in += (inChannels - channelsLeft) * inOffset;
1736 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
1737 for ( unsigned int j=0; j<streamChannels; j++ ) {
1738 *out++ = in[j*inOffset];
1743 channelsLeft -= streamChannels;
1749 // Don't bother draining input
1750 if ( handle->drainCounter ) {
1751 handle->drainCounter++;
1755 AudioDeviceID inputDevice;
1756 inputDevice = handle->id[1];
1757 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && deviceId == inputDevice ) ) {
1759 if ( handle->nStreams[1] == 1 ) {
1760 if ( stream_.doConvertBuffer[1] ) { // convert directly from CoreAudio stream buffer
1761 convertBuffer( stream_.userBuffer[1],
1762 (char *) inBufferList->mBuffers[handle->iStream[1]].mData,
1763 stream_.convertInfo[1] );
1765 else { // copy to user buffer
1766 memcpy( stream_.userBuffer[1],
1767 inBufferList->mBuffers[handle->iStream[1]].mData,
1768 inBufferList->mBuffers[handle->iStream[1]].mDataByteSize );
1771 else { // read from multiple streams
1772 Float32 *outBuffer = (Float32 *) stream_.userBuffer[1];
1773 if ( stream_.doConvertBuffer[1] ) outBuffer = (Float32 *) stream_.deviceBuffer;
1775 if ( stream_.deviceInterleaved[1] == false ) { // mono mode
1776 UInt32 bufferBytes = inBufferList->mBuffers[handle->iStream[1]].mDataByteSize;
1777 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
1778 memcpy( (void *)&outBuffer[i*stream_.bufferSize],
1779 inBufferList->mBuffers[handle->iStream[1]+i].mData, bufferBytes );
1782 else { // read from multiple multi-channel streams
1783 UInt32 streamChannels, channelsLeft, inJump, outJump, outOffset;
1786 bool outInterleaved = ( stream_.userInterleaved ) ? true : false;
1787 UInt32 outChannels = stream_.nUserChannels[1];
1788 if ( stream_.doConvertBuffer[1] ) {
1789 outInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
1790 outChannels = stream_.nDeviceChannels[1];
1793 if ( outInterleaved ) outOffset = 1;
1794 else outOffset = stream_.bufferSize;
1796 channelsLeft = outChannels;
1797 for ( unsigned int i=0; i<handle->nStreams[1]; i++ ) {
1799 in = (Float32 *) inBufferList->mBuffers[handle->iStream[1]+i].mData;
1800 streamChannels = inBufferList->mBuffers[handle->iStream[1]+i].mNumberChannels;
1803 // Account for possible channel offset in first stream
1804 if ( i == 0 && stream_.channelOffset[1] > 0 ) {
1805 streamChannels -= stream_.channelOffset[1];
1806 inJump = stream_.channelOffset[1];
1810 // Account for possible unread channels at end of the last stream
1811 if ( streamChannels > channelsLeft ) {
1812 inJump = streamChannels - channelsLeft;
1813 streamChannels = channelsLeft;
1816 // Determine output buffer offsets and skips
1817 if ( outInterleaved ) {
1818 outJump = outChannels;
1819 out += outChannels - channelsLeft;
1823 out += (outChannels - channelsLeft) * outOffset;
1826 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
1827 for ( unsigned int j=0; j<streamChannels; j++ ) {
1828 out[j*outOffset] = *in++;
1833 channelsLeft -= streamChannels;
1837 if ( stream_.doConvertBuffer[1] ) { // convert from our internal "device" buffer
1838 convertBuffer( stream_.userBuffer[1],
1839 stream_.deviceBuffer,
1840 stream_.convertInfo[1] );
1846 //MUTEX_UNLOCK( &stream_.mutex );
1848 RtApi::tickStreamTime();
1852 const char* RtApiCore :: getErrorCode( OSStatus code )
1856 case kAudioHardwareNotRunningError:
1857 return "kAudioHardwareNotRunningError";
1859 case kAudioHardwareUnspecifiedError:
1860 return "kAudioHardwareUnspecifiedError";
1862 case kAudioHardwareUnknownPropertyError:
1863 return "kAudioHardwareUnknownPropertyError";
1865 case kAudioHardwareBadPropertySizeError:
1866 return "kAudioHardwareBadPropertySizeError";
1868 case kAudioHardwareIllegalOperationError:
1869 return "kAudioHardwareIllegalOperationError";
1871 case kAudioHardwareBadObjectError:
1872 return "kAudioHardwareBadObjectError";
1874 case kAudioHardwareBadDeviceError:
1875 return "kAudioHardwareBadDeviceError";
1877 case kAudioHardwareBadStreamError:
1878 return "kAudioHardwareBadStreamError";
1880 case kAudioHardwareUnsupportedOperationError:
1881 return "kAudioHardwareUnsupportedOperationError";
1883 case kAudioDeviceUnsupportedFormatError:
1884 return "kAudioDeviceUnsupportedFormatError";
1886 case kAudioDevicePermissionsError:
1887 return "kAudioDevicePermissionsError";
1890 return "CoreAudio unknown error";
1894 //******************** End of __MACOSX_CORE__ *********************//
1897 #if defined(__UNIX_JACK__)
1899 // JACK is a low-latency audio server, originally written for the
1900 // GNU/Linux operating system and now also ported to OS-X. It can
1901 // connect a number of different applications to an audio device, as
1902 // well as allowing them to share audio between themselves.
1904 // When using JACK with RtAudio, "devices" refer to JACK clients that
1905 // have ports connected to the server. The JACK server is typically
1906 // started in a terminal as follows:
1908 // .jackd -d alsa -d hw:0
1910 // or through an interface program such as qjackctl. Many of the
1911 // parameters normally set for a stream are fixed by the JACK server
1912 // and can be specified when the JACK server is started. In
1915 // .jackd -d alsa -d hw:0 -r 44100 -p 512 -n 4
1917 // specifies a sample rate of 44100 Hz, a buffer size of 512 sample
1918 // frames, and number of buffers = 4. Once the server is running, it
1919 // is not possible to override these values. If the values are not
1920 // specified in the command-line, the JACK server uses default values.
1922 // The JACK server does not have to be running when an instance of
1923 // RtApiJack is created, though the function getDeviceCount() will
1924 // report 0 devices found until JACK has been started. When no
1925 // devices are available (i.e., the JACK server is not running), a
1926 // stream cannot be opened.
1928 #include <jack/jack.h>
1932 // A structure to hold various information related to the Jack API
1935 jack_client_t *client;
1936 jack_port_t **ports[2];
1937 std::string deviceName[2];
1939 pthread_cond_t condition;
1940 int drainCounter; // Tracks callback counts when draining
1941 bool internalDrain; // Indicates if stop is initiated from callback or not.
1944 :client(0), drainCounter(0), internalDrain(false) { ports[0] = 0; ports[1] = 0; xrun[0] = false; xrun[1] = false; }
1947 #if !defined(__RTAUDIO_DEBUG__)
1948 static void jackSilentError( const char * ) {};
1951 RtApiJack :: RtApiJack()
1952 :shouldAutoconnect_(true) {
1953 // Nothing to do here.
1954 #if !defined(__RTAUDIO_DEBUG__)
1955 // Turn off Jack's internal error reporting.
1956 jack_set_error_function( &jackSilentError );
1960 RtApiJack :: ~RtApiJack()
1962 if ( stream_.state != STREAM_CLOSED ) closeStream();
1965 unsigned int RtApiJack :: getDeviceCount( void )
1967 // See if we can become a jack client.
1968 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
1969 jack_status_t *status = NULL;
1970 jack_client_t *client = jack_client_open( "RtApiJackCount", options, status );
1971 if ( client == 0 ) return 0;
1974 std::string port, previousPort;
1975 unsigned int nChannels = 0, nDevices = 0;
1976 ports = jack_get_ports( client, NULL, JACK_DEFAULT_AUDIO_TYPE, 0 );
1978 // Parse the port names up to the first colon (:).
1981 port = (char *) ports[ nChannels ];
1982 iColon = port.find(":");
1983 if ( iColon != std::string::npos ) {
1984 port = port.substr( 0, iColon + 1 );
1985 if ( port != previousPort ) {
1987 previousPort = port;
1990 } while ( ports[++nChannels] );
1994 jack_client_close( client );
1998 RtAudio::DeviceInfo RtApiJack :: getDeviceInfo( unsigned int device )
2000 RtAudio::DeviceInfo info;
2001 info.probed = false;
2003 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption
2004 jack_status_t *status = NULL;
2005 jack_client_t *client = jack_client_open( "RtApiJackInfo", options, status );
2006 if ( client == 0 ) {
2007 errorText_ = "RtApiJack::getDeviceInfo: Jack server not found or connection error!";
2008 error( RtAudioError::WARNING );
2013 std::string port, previousPort;
2014 unsigned int nPorts = 0, nDevices = 0;
2015 ports = jack_get_ports( client, NULL, JACK_DEFAULT_AUDIO_TYPE, 0 );
2017 // Parse the port names up to the first colon (:).
2020 port = (char *) ports[ nPorts ];
2021 iColon = port.find(":");
2022 if ( iColon != std::string::npos ) {
2023 port = port.substr( 0, iColon );
2024 if ( port != previousPort ) {
2025 if ( nDevices == device ) info.name = port;
2027 previousPort = port;
2030 } while ( ports[++nPorts] );
2034 if ( device >= nDevices ) {
2035 jack_client_close( client );
2036 errorText_ = "RtApiJack::getDeviceInfo: device ID is invalid!";
2037 error( RtAudioError::INVALID_USE );
2041 // Get the current jack server sample rate.
2042 info.sampleRates.clear();
2044 info.preferredSampleRate = jack_get_sample_rate( client );
2045 info.sampleRates.push_back( info.preferredSampleRate );
2047 // Count the available ports containing the client name as device
2048 // channels. Jack "input ports" equal RtAudio output channels.
2049 unsigned int nChannels = 0;
2050 ports = jack_get_ports( client, info.name.c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput );
2052 while ( ports[ nChannels ] ) nChannels++;
2054 info.outputChannels = nChannels;
2057 // Jack "output ports" equal RtAudio input channels.
2059 ports = jack_get_ports( client, info.name.c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput );
2061 while ( ports[ nChannels ] ) nChannels++;
2063 info.inputChannels = nChannels;
2066 if ( info.outputChannels == 0 && info.inputChannels == 0 ) {
2067 jack_client_close(client);
2068 errorText_ = "RtApiJack::getDeviceInfo: error determining Jack input/output channels!";
2069 error( RtAudioError::WARNING );
2073 // If device opens for both playback and capture, we determine the channels.
2074 if ( info.outputChannels > 0 && info.inputChannels > 0 )
2075 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
2077 // Jack always uses 32-bit floats.
2078 info.nativeFormats = RTAUDIO_FLOAT32;
2080 // Jack doesn't provide default devices so we'll use the first available one.
2081 if ( device == 0 && info.outputChannels > 0 )
2082 info.isDefaultOutput = true;
2083 if ( device == 0 && info.inputChannels > 0 )
2084 info.isDefaultInput = true;
2086 jack_client_close(client);
2091 static int jackCallbackHandler( jack_nframes_t nframes, void *infoPointer )
2093 CallbackInfo *info = (CallbackInfo *) infoPointer;
2095 RtApiJack *object = (RtApiJack *) info->object;
2096 if ( object->callbackEvent( (unsigned long) nframes ) == false ) return 1;
2101 // This function will be called by a spawned thread when the Jack
2102 // server signals that it is shutting down. It is necessary to handle
2103 // it this way because the jackShutdown() function must return before
2104 // the jack_deactivate() function (in closeStream()) will return.
2105 static void *jackCloseStream( void *ptr )
2107 CallbackInfo *info = (CallbackInfo *) ptr;
2108 RtApiJack *object = (RtApiJack *) info->object;
2110 object->closeStream();
2112 pthread_exit( NULL );
2114 static void jackShutdown( void *infoPointer )
2116 CallbackInfo *info = (CallbackInfo *) infoPointer;
2117 RtApiJack *object = (RtApiJack *) info->object;
2119 // Check current stream state. If stopped, then we'll assume this
2120 // was called as a result of a call to RtApiJack::stopStream (the
2121 // deactivation of a client handle causes this function to be called).
2122 // If not, we'll assume the Jack server is shutting down or some
2123 // other problem occurred and we should close the stream.
2124 if ( object->isStreamRunning() == false ) return;
2126 ThreadHandle threadId;
2127 pthread_create( &threadId, NULL, jackCloseStream, info );
2128 std::cerr << "\nRtApiJack: the Jack server is shutting down this client ... stream stopped and closed!!\n" << std::endl;
2131 static int jackXrun( void *infoPointer )
2133 JackHandle *handle = *((JackHandle **) infoPointer);
2135 if ( handle->ports[0] ) handle->xrun[0] = true;
2136 if ( handle->ports[1] ) handle->xrun[1] = true;
2141 bool RtApiJack :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
2142 unsigned int firstChannel, unsigned int sampleRate,
2143 RtAudioFormat format, unsigned int *bufferSize,
2144 RtAudio::StreamOptions *options )
2146 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2148 // Look for jack server and try to become a client (only do once per stream).
2149 jack_client_t *client = 0;
2150 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) {
2151 jack_options_t jackoptions = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
2152 jack_status_t *status = NULL;
2153 if ( options && !options->streamName.empty() )
2154 client = jack_client_open( options->streamName.c_str(), jackoptions, status );
2156 client = jack_client_open( "RtApiJack", jackoptions, status );
2157 if ( client == 0 ) {
2158 errorText_ = "RtApiJack::probeDeviceOpen: Jack server not found or connection error!";
2159 error( RtAudioError::WARNING );
2164 // The handle must have been created on an earlier pass.
2165 client = handle->client;
2169 std::string port, previousPort, deviceName;
2170 unsigned int nPorts = 0, nDevices = 0;
2171 ports = jack_get_ports( client, NULL, JACK_DEFAULT_AUDIO_TYPE, 0 );
2173 // Parse the port names up to the first colon (:).
2176 port = (char *) ports[ nPorts ];
2177 iColon = port.find(":");
2178 if ( iColon != std::string::npos ) {
2179 port = port.substr( 0, iColon );
2180 if ( port != previousPort ) {
2181 if ( nDevices == device ) deviceName = port;
2183 previousPort = port;
2186 } while ( ports[++nPorts] );
2190 if ( device >= nDevices ) {
2191 errorText_ = "RtApiJack::probeDeviceOpen: device ID is invalid!";
2195 unsigned long flag = JackPortIsInput;
2196 if ( mode == INPUT ) flag = JackPortIsOutput;
2198 if ( ! (options && (options->flags & RTAUDIO_JACK_DONT_CONNECT)) ) {
2199 // Count the available ports containing the client name as device
2200 // channels. Jack "input ports" equal RtAudio output channels.
2201 unsigned int nChannels = 0;
2202 ports = jack_get_ports( client, deviceName.c_str(), JACK_DEFAULT_AUDIO_TYPE, flag );
2204 while ( ports[ nChannels ] ) nChannels++;
2207 // Compare the jack ports for specified client to the requested number of channels.
2208 if ( nChannels < (channels + firstChannel) ) {
2209 errorStream_ << "RtApiJack::probeDeviceOpen: requested number of channels (" << channels << ") + offset (" << firstChannel << ") not found for specified device (" << device << ":" << deviceName << ").";
2210 errorText_ = errorStream_.str();
2215 // Check the jack server sample rate.
2216 unsigned int jackRate = jack_get_sample_rate( client );
2217 if ( sampleRate != jackRate ) {
2218 jack_client_close( client );
2219 errorStream_ << "RtApiJack::probeDeviceOpen: the requested sample rate (" << sampleRate << ") is different than the JACK server rate (" << jackRate << ").";
2220 errorText_ = errorStream_.str();
2223 stream_.sampleRate = jackRate;
2225 // Get the latency of the JACK port.
2226 ports = jack_get_ports( client, deviceName.c_str(), JACK_DEFAULT_AUDIO_TYPE, flag );
2227 if ( ports[ firstChannel ] ) {
2229 jack_latency_callback_mode_t cbmode = (mode == INPUT ? JackCaptureLatency : JackPlaybackLatency);
2230 // the range (usually the min and max are equal)
2231 jack_latency_range_t latrange; latrange.min = latrange.max = 0;
2232 // get the latency range
2233 jack_port_get_latency_range( jack_port_by_name( client, ports[firstChannel] ), cbmode, &latrange );
2234 // be optimistic, use the min!
2235 stream_.latency[mode] = latrange.min;
2236 //stream_.latency[mode] = jack_port_get_latency( jack_port_by_name( client, ports[ firstChannel ] ) );
2240 // The jack server always uses 32-bit floating-point data.
2241 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
2242 stream_.userFormat = format;
2244 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
2245 else stream_.userInterleaved = true;
2247 // Jack always uses non-interleaved buffers.
2248 stream_.deviceInterleaved[mode] = false;
2250 // Jack always provides host byte-ordered data.
2251 stream_.doByteSwap[mode] = false;
2253 // Get the buffer size. The buffer size and number of buffers
2254 // (periods) is set when the jack server is started.
2255 stream_.bufferSize = (int) jack_get_buffer_size( client );
2256 *bufferSize = stream_.bufferSize;
2258 stream_.nDeviceChannels[mode] = channels;
2259 stream_.nUserChannels[mode] = channels;
2261 // Set flags for buffer conversion.
2262 stream_.doConvertBuffer[mode] = false;
2263 if ( stream_.userFormat != stream_.deviceFormat[mode] )
2264 stream_.doConvertBuffer[mode] = true;
2265 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
2266 stream_.nUserChannels[mode] > 1 )
2267 stream_.doConvertBuffer[mode] = true;
2269 // Allocate our JackHandle structure for the stream.
2270 if ( handle == 0 ) {
2272 handle = new JackHandle;
2274 catch ( std::bad_alloc& ) {
2275 errorText_ = "RtApiJack::probeDeviceOpen: error allocating JackHandle memory.";
2279 if ( pthread_cond_init(&handle->condition, NULL) ) {
2280 errorText_ = "RtApiJack::probeDeviceOpen: error initializing pthread condition variable.";
2283 stream_.apiHandle = (void *) handle;
2284 handle->client = client;
2286 handle->deviceName[mode] = deviceName;
2288 // Allocate necessary internal buffers.
2289 unsigned long bufferBytes;
2290 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
2291 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
2292 if ( stream_.userBuffer[mode] == NULL ) {
2293 errorText_ = "RtApiJack::probeDeviceOpen: error allocating user buffer memory.";
2297 if ( stream_.doConvertBuffer[mode] ) {
2299 bool makeBuffer = true;
2300 if ( mode == OUTPUT )
2301 bufferBytes = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
2302 else { // mode == INPUT
2303 bufferBytes = stream_.nDeviceChannels[1] * formatBytes( stream_.deviceFormat[1] );
2304 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
2305 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes(stream_.deviceFormat[0]);
2306 if ( bufferBytes < bytesOut ) makeBuffer = false;
2311 bufferBytes *= *bufferSize;
2312 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
2313 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
2314 if ( stream_.deviceBuffer == NULL ) {
2315 errorText_ = "RtApiJack::probeDeviceOpen: error allocating device buffer memory.";
2321 // Allocate memory for the Jack ports (channels) identifiers.
2322 handle->ports[mode] = (jack_port_t **) malloc ( sizeof (jack_port_t *) * channels );
2323 if ( handle->ports[mode] == NULL ) {
2324 errorText_ = "RtApiJack::probeDeviceOpen: error allocating port memory.";
2328 stream_.device[mode] = device;
2329 stream_.channelOffset[mode] = firstChannel;
2330 stream_.state = STREAM_STOPPED;
2331 stream_.callbackInfo.object = (void *) this;
2333 if ( stream_.mode == OUTPUT && mode == INPUT )
2334 // We had already set up the stream for output.
2335 stream_.mode = DUPLEX;
2337 stream_.mode = mode;
2338 jack_set_process_callback( handle->client, jackCallbackHandler, (void *) &stream_.callbackInfo );
2339 jack_set_xrun_callback( handle->client, jackXrun, (void *) &stream_.apiHandle );
2340 jack_on_shutdown( handle->client, jackShutdown, (void *) &stream_.callbackInfo );
2343 // Register our ports.
2345 if ( mode == OUTPUT ) {
2346 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2347 snprintf( label, 64, "outport %d", i );
2348 handle->ports[0][i] = jack_port_register( handle->client, (const char *)label,
2349 JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput, 0 );
2353 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2354 snprintf( label, 64, "inport %d", i );
2355 handle->ports[1][i] = jack_port_register( handle->client, (const char *)label,
2356 JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput, 0 );
2360 // Setup the buffer conversion information structure. We don't use
2361 // buffers to do channel offsets, so we override that parameter
2363 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
2365 if ( options && options->flags & RTAUDIO_JACK_DONT_CONNECT ) shouldAutoconnect_ = false;
2371 pthread_cond_destroy( &handle->condition );
2372 jack_client_close( handle->client );
2374 if ( handle->ports[0] ) free( handle->ports[0] );
2375 if ( handle->ports[1] ) free( handle->ports[1] );
2378 stream_.apiHandle = 0;
2381 for ( int i=0; i<2; i++ ) {
2382 if ( stream_.userBuffer[i] ) {
2383 free( stream_.userBuffer[i] );
2384 stream_.userBuffer[i] = 0;
2388 if ( stream_.deviceBuffer ) {
2389 free( stream_.deviceBuffer );
2390 stream_.deviceBuffer = 0;
2396 void RtApiJack :: closeStream( void )
2398 if ( stream_.state == STREAM_CLOSED ) {
2399 errorText_ = "RtApiJack::closeStream(): no open stream to close!";
2400 error( RtAudioError::WARNING );
2404 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2407 if ( stream_.state == STREAM_RUNNING )
2408 jack_deactivate( handle->client );
2410 jack_client_close( handle->client );
2414 if ( handle->ports[0] ) free( handle->ports[0] );
2415 if ( handle->ports[1] ) free( handle->ports[1] );
2416 pthread_cond_destroy( &handle->condition );
2418 stream_.apiHandle = 0;
2421 for ( int i=0; i<2; i++ ) {
2422 if ( stream_.userBuffer[i] ) {
2423 free( stream_.userBuffer[i] );
2424 stream_.userBuffer[i] = 0;
2428 if ( stream_.deviceBuffer ) {
2429 free( stream_.deviceBuffer );
2430 stream_.deviceBuffer = 0;
2433 stream_.mode = UNINITIALIZED;
2434 stream_.state = STREAM_CLOSED;
2437 void RtApiJack :: startStream( void )
2440 if ( stream_.state == STREAM_RUNNING ) {
2441 errorText_ = "RtApiJack::startStream(): the stream is already running!";
2442 error( RtAudioError::WARNING );
2446 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2447 int result = jack_activate( handle->client );
2449 errorText_ = "RtApiJack::startStream(): unable to activate JACK client!";
2455 // Get the list of available ports.
2456 if ( shouldAutoconnect_ && (stream_.mode == OUTPUT || stream_.mode == DUPLEX) ) {
2458 ports = jack_get_ports( handle->client, handle->deviceName[0].c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput);
2459 if ( ports == NULL) {
2460 errorText_ = "RtApiJack::startStream(): error determining available JACK input ports!";
2464 // Now make the port connections. Since RtAudio wasn't designed to
2465 // allow the user to select particular channels of a device, we'll
2466 // just open the first "nChannels" ports with offset.
2467 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2469 if ( ports[ stream_.channelOffset[0] + i ] )
2470 result = jack_connect( handle->client, jack_port_name( handle->ports[0][i] ), ports[ stream_.channelOffset[0] + i ] );
2473 errorText_ = "RtApiJack::startStream(): error connecting output ports!";
2480 if ( shouldAutoconnect_ && (stream_.mode == INPUT || stream_.mode == DUPLEX) ) {
2482 ports = jack_get_ports( handle->client, handle->deviceName[1].c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput );
2483 if ( ports == NULL) {
2484 errorText_ = "RtApiJack::startStream(): error determining available JACK output ports!";
2488 // Now make the port connections. See note above.
2489 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2491 if ( ports[ stream_.channelOffset[1] + i ] )
2492 result = jack_connect( handle->client, ports[ stream_.channelOffset[1] + i ], jack_port_name( handle->ports[1][i] ) );
2495 errorText_ = "RtApiJack::startStream(): error connecting input ports!";
2502 handle->drainCounter = 0;
2503 handle->internalDrain = false;
2504 stream_.state = STREAM_RUNNING;
2507 if ( result == 0 ) return;
2508 error( RtAudioError::SYSTEM_ERROR );
2511 void RtApiJack :: stopStream( void )
2514 if ( stream_.state == STREAM_STOPPED ) {
2515 errorText_ = "RtApiJack::stopStream(): the stream is already stopped!";
2516 error( RtAudioError::WARNING );
2520 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2521 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
2523 if ( handle->drainCounter == 0 ) {
2524 handle->drainCounter = 2;
2525 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
2529 jack_deactivate( handle->client );
2530 stream_.state = STREAM_STOPPED;
2533 void RtApiJack :: abortStream( void )
2536 if ( stream_.state == STREAM_STOPPED ) {
2537 errorText_ = "RtApiJack::abortStream(): the stream is already stopped!";
2538 error( RtAudioError::WARNING );
2542 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2543 handle->drainCounter = 2;
2548 // This function will be called by a spawned thread when the user
2549 // callback function signals that the stream should be stopped or
2550 // aborted. It is necessary to handle it this way because the
2551 // callbackEvent() function must return before the jack_deactivate()
2552 // function will return.
2553 static void *jackStopStream( void *ptr )
2555 CallbackInfo *info = (CallbackInfo *) ptr;
2556 RtApiJack *object = (RtApiJack *) info->object;
2558 object->stopStream();
2559 pthread_exit( NULL );
2562 bool RtApiJack :: callbackEvent( unsigned long nframes )
2564 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
2565 if ( stream_.state == STREAM_CLOSED ) {
2566 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
2567 error( RtAudioError::WARNING );
2570 if ( stream_.bufferSize != nframes ) {
2571 errorText_ = "RtApiCore::callbackEvent(): the JACK buffer size has changed ... cannot process!";
2572 error( RtAudioError::WARNING );
2576 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
2577 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2579 // Check if we were draining the stream and signal is finished.
2580 if ( handle->drainCounter > 3 ) {
2581 ThreadHandle threadId;
2583 stream_.state = STREAM_STOPPING;
2584 if ( handle->internalDrain == true )
2585 pthread_create( &threadId, NULL, jackStopStream, info );
2587 pthread_cond_signal( &handle->condition );
2591 // Invoke user callback first, to get fresh output data.
2592 if ( handle->drainCounter == 0 ) {
2593 RtAudioCallback callback = (RtAudioCallback) info->callback;
2594 double streamTime = getStreamTime();
2595 RtAudioStreamStatus status = 0;
2596 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
2597 status |= RTAUDIO_OUTPUT_UNDERFLOW;
2598 handle->xrun[0] = false;
2600 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
2601 status |= RTAUDIO_INPUT_OVERFLOW;
2602 handle->xrun[1] = false;
2604 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
2605 stream_.bufferSize, streamTime, status, info->userData );
2606 if ( cbReturnValue == 2 ) {
2607 stream_.state = STREAM_STOPPING;
2608 handle->drainCounter = 2;
2610 pthread_create( &id, NULL, jackStopStream, info );
2613 else if ( cbReturnValue == 1 ) {
2614 handle->drainCounter = 1;
2615 handle->internalDrain = true;
2619 jack_default_audio_sample_t *jackbuffer;
2620 unsigned long bufferBytes = nframes * sizeof( jack_default_audio_sample_t );
2621 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
2623 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
2625 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
2626 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2627 memset( jackbuffer, 0, bufferBytes );
2631 else if ( stream_.doConvertBuffer[0] ) {
2633 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
2635 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
2636 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2637 memcpy( jackbuffer, &stream_.deviceBuffer[i*bufferBytes], bufferBytes );
2640 else { // no buffer conversion
2641 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2642 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2643 memcpy( jackbuffer, &stream_.userBuffer[0][i*bufferBytes], bufferBytes );
2648 // Don't bother draining input
2649 if ( handle->drainCounter ) {
2650 handle->drainCounter++;
2654 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
2656 if ( stream_.doConvertBuffer[1] ) {
2657 for ( unsigned int i=0; i<stream_.nDeviceChannels[1]; i++ ) {
2658 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
2659 memcpy( &stream_.deviceBuffer[i*bufferBytes], jackbuffer, bufferBytes );
2661 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
2663 else { // no buffer conversion
2664 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2665 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
2666 memcpy( &stream_.userBuffer[1][i*bufferBytes], jackbuffer, bufferBytes );
2672 RtApi::tickStreamTime();
2675 //******************** End of __UNIX_JACK__ *********************//
2678 #if defined(__WINDOWS_ASIO__) // ASIO API on Windows
2680 // The ASIO API is designed around a callback scheme, so this
2681 // implementation is similar to that used for OS-X CoreAudio and Linux
2682 // Jack. The primary constraint with ASIO is that it only allows
2683 // access to a single driver at a time. Thus, it is not possible to
2684 // have more than one simultaneous RtAudio stream.
2686 // This implementation also requires a number of external ASIO files
2687 // and a few global variables. The ASIO callback scheme does not
2688 // allow for the passing of user data, so we must create a global
2689 // pointer to our callbackInfo structure.
2691 // On unix systems, we make use of a pthread condition variable.
2692 // Since there is no equivalent in Windows, I hacked something based
2693 // on information found in
2694 // http://www.cs.wustl.edu/~schmidt/win32-cv-1.html.
2696 #include "asiosys.h"
2698 #include "iasiothiscallresolver.h"
2699 #include "asiodrivers.h"
2702 static AsioDrivers drivers;
2703 static ASIOCallbacks asioCallbacks;
2704 static ASIODriverInfo driverInfo;
2705 static CallbackInfo *asioCallbackInfo;
2706 static bool asioXRun;
2709 int drainCounter; // Tracks callback counts when draining
2710 bool internalDrain; // Indicates if stop is initiated from callback or not.
2711 ASIOBufferInfo *bufferInfos;
2715 :drainCounter(0), internalDrain(false), bufferInfos(0) {}
2718 // Function declarations (definitions at end of section)
2719 static const char* getAsioErrorString( ASIOError result );
2720 static void sampleRateChanged( ASIOSampleRate sRate );
2721 static long asioMessages( long selector, long value, void* message, double* opt );
2723 RtApiAsio :: RtApiAsio()
2725 // ASIO cannot run on a multi-threaded appartment. You can call
2726 // CoInitialize beforehand, but it must be for appartment threading
2727 // (in which case, CoInitilialize will return S_FALSE here).
2728 coInitialized_ = false;
2729 HRESULT hr = CoInitialize( NULL );
2731 errorText_ = "RtApiAsio::ASIO requires a single-threaded appartment. Call CoInitializeEx(0,COINIT_APARTMENTTHREADED)";
2732 error( RtAudioError::WARNING );
2734 coInitialized_ = true;
2736 drivers.removeCurrentDriver();
2737 driverInfo.asioVersion = 2;
2739 // See note in DirectSound implementation about GetDesktopWindow().
2740 driverInfo.sysRef = GetForegroundWindow();
2743 RtApiAsio :: ~RtApiAsio()
2745 if ( stream_.state != STREAM_CLOSED ) closeStream();
2746 if ( coInitialized_ ) CoUninitialize();
2749 unsigned int RtApiAsio :: getDeviceCount( void )
2751 return (unsigned int) drivers.asioGetNumDev();
2754 RtAudio::DeviceInfo RtApiAsio :: getDeviceInfo( unsigned int device )
2756 RtAudio::DeviceInfo info;
2757 info.probed = false;
2760 unsigned int nDevices = getDeviceCount();
2761 if ( nDevices == 0 ) {
2762 errorText_ = "RtApiAsio::getDeviceInfo: no devices found!";
2763 error( RtAudioError::INVALID_USE );
2767 if ( device >= nDevices ) {
2768 errorText_ = "RtApiAsio::getDeviceInfo: device ID is invalid!";
2769 error( RtAudioError::INVALID_USE );
2773 // If a stream is already open, we cannot probe other devices. Thus, use the saved results.
2774 if ( stream_.state != STREAM_CLOSED ) {
2775 if ( device >= devices_.size() ) {
2776 errorText_ = "RtApiAsio::getDeviceInfo: device ID was not present before stream was opened.";
2777 error( RtAudioError::WARNING );
2780 return devices_[ device ];
2783 char driverName[32];
2784 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
2785 if ( result != ASE_OK ) {
2786 errorStream_ << "RtApiAsio::getDeviceInfo: unable to get driver name (" << getAsioErrorString( result ) << ").";
2787 errorText_ = errorStream_.str();
2788 error( RtAudioError::WARNING );
2792 info.name = driverName;
2794 if ( !drivers.loadDriver( driverName ) ) {
2795 errorStream_ << "RtApiAsio::getDeviceInfo: unable to load driver (" << driverName << ").";
2796 errorText_ = errorStream_.str();
2797 error( RtAudioError::WARNING );
2801 result = ASIOInit( &driverInfo );
2802 if ( result != ASE_OK ) {
2803 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
2804 errorText_ = errorStream_.str();
2805 error( RtAudioError::WARNING );
2809 // Determine the device channel information.
2810 long inputChannels, outputChannels;
2811 result = ASIOGetChannels( &inputChannels, &outputChannels );
2812 if ( result != ASE_OK ) {
2813 drivers.removeCurrentDriver();
2814 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
2815 errorText_ = errorStream_.str();
2816 error( RtAudioError::WARNING );
2820 info.outputChannels = outputChannels;
2821 info.inputChannels = inputChannels;
2822 if ( info.outputChannels > 0 && info.inputChannels > 0 )
2823 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
2825 // Determine the supported sample rates.
2826 info.sampleRates.clear();
2827 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
2828 result = ASIOCanSampleRate( (ASIOSampleRate) SAMPLE_RATES[i] );
2829 if ( result == ASE_OK ) {
2830 info.sampleRates.push_back( SAMPLE_RATES[i] );
2832 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
2833 info.preferredSampleRate = SAMPLE_RATES[i];
2837 // Determine supported data types ... just check first channel and assume rest are the same.
2838 ASIOChannelInfo channelInfo;
2839 channelInfo.channel = 0;
2840 channelInfo.isInput = true;
2841 if ( info.inputChannels <= 0 ) channelInfo.isInput = false;
2842 result = ASIOGetChannelInfo( &channelInfo );
2843 if ( result != ASE_OK ) {
2844 drivers.removeCurrentDriver();
2845 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting driver channel info (" << driverName << ").";
2846 errorText_ = errorStream_.str();
2847 error( RtAudioError::WARNING );
2851 info.nativeFormats = 0;
2852 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB )
2853 info.nativeFormats |= RTAUDIO_SINT16;
2854 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB )
2855 info.nativeFormats |= RTAUDIO_SINT32;
2856 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB )
2857 info.nativeFormats |= RTAUDIO_FLOAT32;
2858 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB )
2859 info.nativeFormats |= RTAUDIO_FLOAT64;
2860 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB )
2861 info.nativeFormats |= RTAUDIO_SINT24;
2863 if ( info.outputChannels > 0 )
2864 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
2865 if ( info.inputChannels > 0 )
2866 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
2869 drivers.removeCurrentDriver();
2873 static void bufferSwitch( long index, ASIOBool /*processNow*/ )
2875 RtApiAsio *object = (RtApiAsio *) asioCallbackInfo->object;
2876 object->callbackEvent( index );
2879 void RtApiAsio :: saveDeviceInfo( void )
2883 unsigned int nDevices = getDeviceCount();
2884 devices_.resize( nDevices );
2885 for ( unsigned int i=0; i<nDevices; i++ )
2886 devices_[i] = getDeviceInfo( i );
2889 bool RtApiAsio :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
2890 unsigned int firstChannel, unsigned int sampleRate,
2891 RtAudioFormat format, unsigned int *bufferSize,
2892 RtAudio::StreamOptions *options )
2893 {////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
2895 bool isDuplexInput = mode == INPUT && stream_.mode == OUTPUT;
2897 // For ASIO, a duplex stream MUST use the same driver.
2898 if ( isDuplexInput && stream_.device[0] != device ) {
2899 errorText_ = "RtApiAsio::probeDeviceOpen: an ASIO duplex stream must use the same device for input and output!";
2903 char driverName[32];
2904 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
2905 if ( result != ASE_OK ) {
2906 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to get driver name (" << getAsioErrorString( result ) << ").";
2907 errorText_ = errorStream_.str();
2911 // Only load the driver once for duplex stream.
2912 if ( !isDuplexInput ) {
2913 // The getDeviceInfo() function will not work when a stream is open
2914 // because ASIO does not allow multiple devices to run at the same
2915 // time. Thus, we'll probe the system before opening a stream and
2916 // save the results for use by getDeviceInfo().
2917 this->saveDeviceInfo();
2919 if ( !drivers.loadDriver( driverName ) ) {
2920 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to load driver (" << driverName << ").";
2921 errorText_ = errorStream_.str();
2925 result = ASIOInit( &driverInfo );
2926 if ( result != ASE_OK ) {
2927 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
2928 errorText_ = errorStream_.str();
2933 // keep them before any "goto error", they are used for error cleanup + goto device boundary checks
2934 bool buffersAllocated = false;
2935 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
2936 unsigned int nChannels;
2939 // Check the device channel count.
2940 long inputChannels, outputChannels;
2941 result = ASIOGetChannels( &inputChannels, &outputChannels );
2942 if ( result != ASE_OK ) {
2943 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
2944 errorText_ = errorStream_.str();
2948 if ( ( mode == OUTPUT && (channels+firstChannel) > (unsigned int) outputChannels) ||
2949 ( mode == INPUT && (channels+firstChannel) > (unsigned int) inputChannels) ) {
2950 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested channel count (" << channels << ") + offset (" << firstChannel << ").";
2951 errorText_ = errorStream_.str();
2954 stream_.nDeviceChannels[mode] = channels;
2955 stream_.nUserChannels[mode] = channels;
2956 stream_.channelOffset[mode] = firstChannel;
2958 // Verify the sample rate is supported.
2959 result = ASIOCanSampleRate( (ASIOSampleRate) sampleRate );
2960 if ( result != ASE_OK ) {
2961 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested sample rate (" << sampleRate << ").";
2962 errorText_ = errorStream_.str();
2966 // Get the current sample rate
2967 ASIOSampleRate currentRate;
2968 result = ASIOGetSampleRate( ¤tRate );
2969 if ( result != ASE_OK ) {
2970 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error getting sample rate.";
2971 errorText_ = errorStream_.str();
2975 // Set the sample rate only if necessary
2976 if ( currentRate != sampleRate ) {
2977 result = ASIOSetSampleRate( (ASIOSampleRate) sampleRate );
2978 if ( result != ASE_OK ) {
2979 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error setting sample rate (" << sampleRate << ").";
2980 errorText_ = errorStream_.str();
2985 // Determine the driver data type.
2986 ASIOChannelInfo channelInfo;
2987 channelInfo.channel = 0;
2988 if ( mode == OUTPUT ) channelInfo.isInput = false;
2989 else channelInfo.isInput = true;
2990 result = ASIOGetChannelInfo( &channelInfo );
2991 if ( result != ASE_OK ) {
2992 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting data format.";
2993 errorText_ = errorStream_.str();
2997 // Assuming WINDOWS host is always little-endian.
2998 stream_.doByteSwap[mode] = false;
2999 stream_.userFormat = format;
3000 stream_.deviceFormat[mode] = 0;
3001 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB ) {
3002 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
3003 if ( channelInfo.type == ASIOSTInt16MSB ) stream_.doByteSwap[mode] = true;
3005 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB ) {
3006 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
3007 if ( channelInfo.type == ASIOSTInt32MSB ) stream_.doByteSwap[mode] = true;
3009 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB ) {
3010 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
3011 if ( channelInfo.type == ASIOSTFloat32MSB ) stream_.doByteSwap[mode] = true;
3013 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB ) {
3014 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
3015 if ( channelInfo.type == ASIOSTFloat64MSB ) stream_.doByteSwap[mode] = true;
3017 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB ) {
3018 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
3019 if ( channelInfo.type == ASIOSTInt24MSB ) stream_.doByteSwap[mode] = true;
3022 if ( stream_.deviceFormat[mode] == 0 ) {
3023 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") data format not supported by RtAudio.";
3024 errorText_ = errorStream_.str();
3028 // Set the buffer size. For a duplex stream, this will end up
3029 // setting the buffer size based on the input constraints, which
3031 long minSize, maxSize, preferSize, granularity;
3032 result = ASIOGetBufferSize( &minSize, &maxSize, &preferSize, &granularity );
3033 if ( result != ASE_OK ) {
3034 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting buffer size.";
3035 errorText_ = errorStream_.str();
3039 if ( isDuplexInput ) {
3040 // When this is the duplex input (output was opened before), then we have to use the same
3041 // buffersize as the output, because it might use the preferred buffer size, which most
3042 // likely wasn't passed as input to this. The buffer sizes have to be identically anyway,
3043 // So instead of throwing an error, make them equal. The caller uses the reference
3044 // to the "bufferSize" param as usual to set up processing buffers.
3046 *bufferSize = stream_.bufferSize;
3049 if ( *bufferSize == 0 ) *bufferSize = preferSize;
3050 else if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
3051 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
3052 else if ( granularity == -1 ) {
3053 // Make sure bufferSize is a power of two.
3054 int log2_of_min_size = 0;
3055 int log2_of_max_size = 0;
3057 for ( unsigned int i = 0; i < sizeof(long) * 8; i++ ) {
3058 if ( minSize & ((long)1 << i) ) log2_of_min_size = i;
3059 if ( maxSize & ((long)1 << i) ) log2_of_max_size = i;
3062 long min_delta = std::abs( (long)*bufferSize - ((long)1 << log2_of_min_size) );
3063 int min_delta_num = log2_of_min_size;
3065 for (int i = log2_of_min_size + 1; i <= log2_of_max_size; i++) {
3066 long current_delta = std::abs( (long)*bufferSize - ((long)1 << i) );
3067 if (current_delta < min_delta) {
3068 min_delta = current_delta;
3073 *bufferSize = ( (unsigned int)1 << min_delta_num );
3074 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
3075 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
3077 else if ( granularity != 0 ) {
3078 // Set to an even multiple of granularity, rounding up.
3079 *bufferSize = (*bufferSize + granularity-1) / granularity * granularity;
3084 // we don't use it anymore, see above!
3085 // Just left it here for the case...
3086 if ( isDuplexInput && stream_.bufferSize != *bufferSize ) {
3087 errorText_ = "RtApiAsio::probeDeviceOpen: input/output buffersize discrepancy!";
3092 stream_.bufferSize = *bufferSize;
3093 stream_.nBuffers = 2;
3095 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
3096 else stream_.userInterleaved = true;
3098 // ASIO always uses non-interleaved buffers.
3099 stream_.deviceInterleaved[mode] = false;
3101 // Allocate, if necessary, our AsioHandle structure for the stream.
3102 if ( handle == 0 ) {
3104 handle = new AsioHandle;
3106 catch ( std::bad_alloc& ) {
3107 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating AsioHandle memory.";
3110 handle->bufferInfos = 0;
3112 // Create a manual-reset event.
3113 handle->condition = CreateEvent( NULL, // no security
3114 TRUE, // manual-reset
3115 FALSE, // non-signaled initially
3117 stream_.apiHandle = (void *) handle;
3120 // Create the ASIO internal buffers. Since RtAudio sets up input
3121 // and output separately, we'll have to dispose of previously
3122 // created output buffers for a duplex stream.
3123 if ( mode == INPUT && stream_.mode == OUTPUT ) {
3124 ASIODisposeBuffers();
3125 if ( handle->bufferInfos ) free( handle->bufferInfos );
3128 // Allocate, initialize, and save the bufferInfos in our stream callbackInfo structure.
3130 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
3131 handle->bufferInfos = (ASIOBufferInfo *) malloc( nChannels * sizeof(ASIOBufferInfo) );
3132 if ( handle->bufferInfos == NULL ) {
3133 errorStream_ << "RtApiAsio::probeDeviceOpen: error allocating bufferInfo memory for driver (" << driverName << ").";
3134 errorText_ = errorStream_.str();
3138 ASIOBufferInfo *infos;
3139 infos = handle->bufferInfos;
3140 for ( i=0; i<stream_.nDeviceChannels[0]; i++, infos++ ) {
3141 infos->isInput = ASIOFalse;
3142 infos->channelNum = i + stream_.channelOffset[0];
3143 infos->buffers[0] = infos->buffers[1] = 0;
3145 for ( i=0; i<stream_.nDeviceChannels[1]; i++, infos++ ) {
3146 infos->isInput = ASIOTrue;
3147 infos->channelNum = i + stream_.channelOffset[1];
3148 infos->buffers[0] = infos->buffers[1] = 0;
3151 // prepare for callbacks
3152 stream_.sampleRate = sampleRate;
3153 stream_.device[mode] = device;
3154 stream_.mode = isDuplexInput ? DUPLEX : mode;
3156 // store this class instance before registering callbacks, that are going to use it
3157 asioCallbackInfo = &stream_.callbackInfo;
3158 stream_.callbackInfo.object = (void *) this;
3160 // Set up the ASIO callback structure and create the ASIO data buffers.
3161 asioCallbacks.bufferSwitch = &bufferSwitch;
3162 asioCallbacks.sampleRateDidChange = &sampleRateChanged;
3163 asioCallbacks.asioMessage = &asioMessages;
3164 asioCallbacks.bufferSwitchTimeInfo = NULL;
3165 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
3166 if ( result != ASE_OK ) {
3167 // Standard method failed. This can happen with strict/misbehaving drivers that return valid buffer size ranges
3168 // but only accept the preferred buffer size as parameter for ASIOCreateBuffers (e.g. Creative's ASIO driver).
3169 // In that case, let's be naïve and try that instead.
3170 *bufferSize = preferSize;
3171 stream_.bufferSize = *bufferSize;
3172 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
3175 if ( result != ASE_OK ) {
3176 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") creating buffers.";
3177 errorText_ = errorStream_.str();
3180 buffersAllocated = true;
3181 stream_.state = STREAM_STOPPED;
3183 // Set flags for buffer conversion.
3184 stream_.doConvertBuffer[mode] = false;
3185 if ( stream_.userFormat != stream_.deviceFormat[mode] )
3186 stream_.doConvertBuffer[mode] = true;
3187 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
3188 stream_.nUserChannels[mode] > 1 )
3189 stream_.doConvertBuffer[mode] = true;
3191 // Allocate necessary internal buffers
3192 unsigned long bufferBytes;
3193 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
3194 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
3195 if ( stream_.userBuffer[mode] == NULL ) {
3196 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating user buffer memory.";
3200 if ( stream_.doConvertBuffer[mode] ) {
3202 bool makeBuffer = true;
3203 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
3204 if ( isDuplexInput && stream_.deviceBuffer ) {
3205 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
3206 if ( bufferBytes <= bytesOut ) makeBuffer = false;
3210 bufferBytes *= *bufferSize;
3211 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
3212 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
3213 if ( stream_.deviceBuffer == NULL ) {
3214 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating device buffer memory.";
3220 // Determine device latencies
3221 long inputLatency, outputLatency;
3222 result = ASIOGetLatencies( &inputLatency, &outputLatency );
3223 if ( result != ASE_OK ) {
3224 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting latency.";
3225 errorText_ = errorStream_.str();
3226 error( RtAudioError::WARNING); // warn but don't fail
3229 stream_.latency[0] = outputLatency;
3230 stream_.latency[1] = inputLatency;
3233 // Setup the buffer conversion information structure. We don't use
3234 // buffers to do channel offsets, so we override that parameter
3236 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
3241 if ( !isDuplexInput ) {
3242 // the cleanup for error in the duplex input, is done by RtApi::openStream
3243 // So we clean up for single channel only
3245 if ( buffersAllocated )
3246 ASIODisposeBuffers();
3248 drivers.removeCurrentDriver();
3251 CloseHandle( handle->condition );
3252 if ( handle->bufferInfos )
3253 free( handle->bufferInfos );
3256 stream_.apiHandle = 0;
3260 if ( stream_.userBuffer[mode] ) {
3261 free( stream_.userBuffer[mode] );
3262 stream_.userBuffer[mode] = 0;
3265 if ( stream_.deviceBuffer ) {
3266 free( stream_.deviceBuffer );
3267 stream_.deviceBuffer = 0;
3272 }////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
3274 void RtApiAsio :: closeStream()
3276 if ( stream_.state == STREAM_CLOSED ) {
3277 errorText_ = "RtApiAsio::closeStream(): no open stream to close!";
3278 error( RtAudioError::WARNING );
3282 if ( stream_.state == STREAM_RUNNING ) {
3283 stream_.state = STREAM_STOPPED;
3286 ASIODisposeBuffers();
3287 drivers.removeCurrentDriver();
3289 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3291 CloseHandle( handle->condition );
3292 if ( handle->bufferInfos )
3293 free( handle->bufferInfos );
3295 stream_.apiHandle = 0;
3298 for ( int i=0; i<2; i++ ) {
3299 if ( stream_.userBuffer[i] ) {
3300 free( stream_.userBuffer[i] );
3301 stream_.userBuffer[i] = 0;
3305 if ( stream_.deviceBuffer ) {
3306 free( stream_.deviceBuffer );
3307 stream_.deviceBuffer = 0;
3310 stream_.mode = UNINITIALIZED;
3311 stream_.state = STREAM_CLOSED;
3314 bool stopThreadCalled = false;
3316 void RtApiAsio :: startStream()
3319 if ( stream_.state == STREAM_RUNNING ) {
3320 errorText_ = "RtApiAsio::startStream(): the stream is already running!";
3321 error( RtAudioError::WARNING );
3325 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3326 ASIOError result = ASIOStart();
3327 if ( result != ASE_OK ) {
3328 errorStream_ << "RtApiAsio::startStream: error (" << getAsioErrorString( result ) << ") starting device.";
3329 errorText_ = errorStream_.str();
3333 handle->drainCounter = 0;
3334 handle->internalDrain = false;
3335 ResetEvent( handle->condition );
3336 stream_.state = STREAM_RUNNING;
3340 stopThreadCalled = false;
3342 if ( result == ASE_OK ) return;
3343 error( RtAudioError::SYSTEM_ERROR );
3346 void RtApiAsio :: stopStream()
3349 if ( stream_.state == STREAM_STOPPED ) {
3350 errorText_ = "RtApiAsio::stopStream(): the stream is already stopped!";
3351 error( RtAudioError::WARNING );
3355 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3356 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
3357 if ( handle->drainCounter == 0 ) {
3358 handle->drainCounter = 2;
3359 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
3363 stream_.state = STREAM_STOPPED;
3365 ASIOError result = ASIOStop();
3366 if ( result != ASE_OK ) {
3367 errorStream_ << "RtApiAsio::stopStream: error (" << getAsioErrorString( result ) << ") stopping device.";
3368 errorText_ = errorStream_.str();
3371 if ( result == ASE_OK ) return;
3372 error( RtAudioError::SYSTEM_ERROR );
3375 void RtApiAsio :: abortStream()
3378 if ( stream_.state == STREAM_STOPPED ) {
3379 errorText_ = "RtApiAsio::abortStream(): the stream is already stopped!";
3380 error( RtAudioError::WARNING );
3384 // The following lines were commented-out because some behavior was
3385 // noted where the device buffers need to be zeroed to avoid
3386 // continuing sound, even when the device buffers are completely
3387 // disposed. So now, calling abort is the same as calling stop.
3388 // AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3389 // handle->drainCounter = 2;
3393 // This function will be called by a spawned thread when the user
3394 // callback function signals that the stream should be stopped or
3395 // aborted. It is necessary to handle it this way because the
3396 // callbackEvent() function must return before the ASIOStop()
3397 // function will return.
3398 static unsigned __stdcall asioStopStream( void *ptr )
3400 CallbackInfo *info = (CallbackInfo *) ptr;
3401 RtApiAsio *object = (RtApiAsio *) info->object;
3403 object->stopStream();
3408 bool RtApiAsio :: callbackEvent( long bufferIndex )
3410 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
3411 if ( stream_.state == STREAM_CLOSED ) {
3412 errorText_ = "RtApiAsio::callbackEvent(): the stream is closed ... this shouldn't happen!";
3413 error( RtAudioError::WARNING );
3417 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
3418 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3420 // Check if we were draining the stream and signal if finished.
3421 if ( handle->drainCounter > 3 ) {
3423 stream_.state = STREAM_STOPPING;
3424 if ( handle->internalDrain == false )
3425 SetEvent( handle->condition );
3426 else { // spawn a thread to stop the stream
3428 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
3429 &stream_.callbackInfo, 0, &threadId );
3434 // Invoke user callback to get fresh output data UNLESS we are
3436 if ( handle->drainCounter == 0 ) {
3437 RtAudioCallback callback = (RtAudioCallback) info->callback;
3438 double streamTime = getStreamTime();
3439 RtAudioStreamStatus status = 0;
3440 if ( stream_.mode != INPUT && asioXRun == true ) {
3441 status |= RTAUDIO_OUTPUT_UNDERFLOW;
3444 if ( stream_.mode != OUTPUT && asioXRun == true ) {
3445 status |= RTAUDIO_INPUT_OVERFLOW;
3448 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
3449 stream_.bufferSize, streamTime, status, info->userData );
3450 if ( cbReturnValue == 2 ) {
3451 stream_.state = STREAM_STOPPING;
3452 handle->drainCounter = 2;
3454 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
3455 &stream_.callbackInfo, 0, &threadId );
3458 else if ( cbReturnValue == 1 ) {
3459 handle->drainCounter = 1;
3460 handle->internalDrain = true;
3464 unsigned int nChannels, bufferBytes, i, j;
3465 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
3466 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
3468 bufferBytes = stream_.bufferSize * formatBytes( stream_.deviceFormat[0] );
3470 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
3472 for ( i=0, j=0; i<nChannels; i++ ) {
3473 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3474 memset( handle->bufferInfos[i].buffers[bufferIndex], 0, bufferBytes );
3478 else if ( stream_.doConvertBuffer[0] ) {
3480 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
3481 if ( stream_.doByteSwap[0] )
3482 byteSwapBuffer( stream_.deviceBuffer,
3483 stream_.bufferSize * stream_.nDeviceChannels[0],
3484 stream_.deviceFormat[0] );
3486 for ( i=0, j=0; i<nChannels; i++ ) {
3487 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3488 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
3489 &stream_.deviceBuffer[j++*bufferBytes], bufferBytes );
3495 if ( stream_.doByteSwap[0] )
3496 byteSwapBuffer( stream_.userBuffer[0],
3497 stream_.bufferSize * stream_.nUserChannels[0],
3498 stream_.userFormat );
3500 for ( i=0, j=0; i<nChannels; i++ ) {
3501 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3502 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
3503 &stream_.userBuffer[0][bufferBytes*j++], bufferBytes );
3509 // Don't bother draining input
3510 if ( handle->drainCounter ) {
3511 handle->drainCounter++;
3515 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
3517 bufferBytes = stream_.bufferSize * formatBytes(stream_.deviceFormat[1]);
3519 if (stream_.doConvertBuffer[1]) {
3521 // Always interleave ASIO input data.
3522 for ( i=0, j=0; i<nChannels; i++ ) {
3523 if ( handle->bufferInfos[i].isInput == ASIOTrue )
3524 memcpy( &stream_.deviceBuffer[j++*bufferBytes],
3525 handle->bufferInfos[i].buffers[bufferIndex],
3529 if ( stream_.doByteSwap[1] )
3530 byteSwapBuffer( stream_.deviceBuffer,
3531 stream_.bufferSize * stream_.nDeviceChannels[1],
3532 stream_.deviceFormat[1] );
3533 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
3537 for ( i=0, j=0; i<nChannels; i++ ) {
3538 if ( handle->bufferInfos[i].isInput == ASIOTrue ) {
3539 memcpy( &stream_.userBuffer[1][bufferBytes*j++],
3540 handle->bufferInfos[i].buffers[bufferIndex],
3545 if ( stream_.doByteSwap[1] )
3546 byteSwapBuffer( stream_.userBuffer[1],
3547 stream_.bufferSize * stream_.nUserChannels[1],
3548 stream_.userFormat );
3553 // The following call was suggested by Malte Clasen. While the API
3554 // documentation indicates it should not be required, some device
3555 // drivers apparently do not function correctly without it.
3558 RtApi::tickStreamTime();
3562 static void sampleRateChanged( ASIOSampleRate sRate )
3564 // The ASIO documentation says that this usually only happens during
3565 // external sync. Audio processing is not stopped by the driver,
3566 // actual sample rate might not have even changed, maybe only the
3567 // sample rate status of an AES/EBU or S/PDIF digital input at the
3570 RtApi *object = (RtApi *) asioCallbackInfo->object;
3572 object->stopStream();
3574 catch ( RtAudioError &exception ) {
3575 std::cerr << "\nRtApiAsio: sampleRateChanged() error (" << exception.getMessage() << ")!\n" << std::endl;
3579 std::cerr << "\nRtApiAsio: driver reports sample rate changed to " << sRate << " ... stream stopped!!!\n" << std::endl;
3582 static long asioMessages( long selector, long value, void* /*message*/, double* /*opt*/ )
3586 switch( selector ) {
3587 case kAsioSelectorSupported:
3588 if ( value == kAsioResetRequest
3589 || value == kAsioEngineVersion
3590 || value == kAsioResyncRequest
3591 || value == kAsioLatenciesChanged
3592 // The following three were added for ASIO 2.0, you don't
3593 // necessarily have to support them.
3594 || value == kAsioSupportsTimeInfo
3595 || value == kAsioSupportsTimeCode
3596 || value == kAsioSupportsInputMonitor)
3599 case kAsioResetRequest:
3600 // Defer the task and perform the reset of the driver during the
3601 // next "safe" situation. You cannot reset the driver right now,
3602 // as this code is called from the driver. Reset the driver is
3603 // done by completely destruct is. I.e. ASIOStop(),
3604 // ASIODisposeBuffers(), Destruction Afterwards you initialize the
3606 std::cerr << "\nRtApiAsio: driver reset requested!!!" << std::endl;
3609 case kAsioResyncRequest:
3610 // This informs the application that the driver encountered some
3611 // non-fatal data loss. It is used for synchronization purposes
3612 // of different media. Added mainly to work around the Win16Mutex
3613 // problems in Windows 95/98 with the Windows Multimedia system,
3614 // which could lose data because the Mutex was held too long by
3615 // another thread. However a driver can issue it in other
3617 // std::cerr << "\nRtApiAsio: driver resync requested!!!" << std::endl;
3621 case kAsioLatenciesChanged:
3622 // This will inform the host application that the drivers were
3623 // latencies changed. Beware, it this does not mean that the
3624 // buffer sizes have changed! You might need to update internal
3626 std::cerr << "\nRtApiAsio: driver latency may have changed!!!" << std::endl;
3629 case kAsioEngineVersion:
3630 // Return the supported ASIO version of the host application. If
3631 // a host application does not implement this selector, ASIO 1.0
3632 // is assumed by the driver.
3635 case kAsioSupportsTimeInfo:
3636 // Informs the driver whether the
3637 // asioCallbacks.bufferSwitchTimeInfo() callback is supported.
3638 // For compatibility with ASIO 1.0 drivers the host application
3639 // should always support the "old" bufferSwitch method, too.
3642 case kAsioSupportsTimeCode:
3643 // Informs the driver whether application is interested in time
3644 // code info. If an application does not need to know about time
3645 // code, the driver has less work to do.
3652 static const char* getAsioErrorString( ASIOError result )
3660 static const Messages m[] =
3662 { ASE_NotPresent, "Hardware input or output is not present or available." },
3663 { ASE_HWMalfunction, "Hardware is malfunctioning." },
3664 { ASE_InvalidParameter, "Invalid input parameter." },
3665 { ASE_InvalidMode, "Invalid mode." },
3666 { ASE_SPNotAdvancing, "Sample position not advancing." },
3667 { ASE_NoClock, "Sample clock or rate cannot be determined or is not present." },
3668 { ASE_NoMemory, "Not enough memory to complete the request." }
3671 for ( unsigned int i = 0; i < sizeof(m)/sizeof(m[0]); ++i )
3672 if ( m[i].value == result ) return m[i].message;
3674 return "Unknown error.";
3677 //******************** End of __WINDOWS_ASIO__ *********************//
3681 #if defined(__WINDOWS_WASAPI__) // Windows WASAPI API
3683 // Authored by Marcus Tomlinson <themarcustomlinson@gmail.com>, April 2014
3684 // - Introduces support for the Windows WASAPI API
3685 // - Aims to deliver bit streams to and from hardware at the lowest possible latency, via the absolute minimum buffer sizes required
3686 // - Provides flexible stream configuration to an otherwise strict and inflexible WASAPI interface
3687 // - Includes automatic internal conversion of sample rate and buffer size between hardware and the user
3692 #include <audioclient.h>
3694 #include <mmdeviceapi.h>
3695 #include <functiondiscoverykeys_devpkey.h>
3698 //=============================================================================
3700 #define SAFE_RELEASE( objectPtr )\
3703 objectPtr->Release();\
3707 typedef HANDLE ( __stdcall *TAvSetMmThreadCharacteristicsPtr )( LPCWSTR TaskName, LPDWORD TaskIndex );
3709 //-----------------------------------------------------------------------------
3711 // WASAPI dictates stream sample rate, format, channel count, and in some cases, buffer size.
3712 // Therefore we must perform all necessary conversions to user buffers in order to satisfy these
3713 // requirements. WasapiBuffer ring buffers are used between HwIn->UserIn and UserOut->HwOut to
3714 // provide intermediate storage for read / write synchronization.
3728 // sets the length of the internal ring buffer
3729 void setBufferSize( unsigned int bufferSize, unsigned int formatBytes ) {
3732 buffer_ = ( char* ) calloc( bufferSize, formatBytes );
3734 bufferSize_ = bufferSize;
3739 // attempt to push a buffer into the ring buffer at the current "in" index
3740 bool pushBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
3742 if ( !buffer || // incoming buffer is NULL
3743 bufferSize == 0 || // incoming buffer has no data
3744 bufferSize > bufferSize_ ) // incoming buffer too large
3749 unsigned int relOutIndex = outIndex_;
3750 unsigned int inIndexEnd = inIndex_ + bufferSize;
3751 if ( relOutIndex < inIndex_ && inIndexEnd >= bufferSize_ ) {
3752 relOutIndex += bufferSize_;
3755 // "in" index can end on the "out" index but cannot begin at it
3756 if ( inIndex_ <= relOutIndex && inIndexEnd > relOutIndex ) {
3757 return false; // not enough space between "in" index and "out" index
3760 // copy buffer from external to internal
3761 int fromZeroSize = inIndex_ + bufferSize - bufferSize_;
3762 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
3763 int fromInSize = bufferSize - fromZeroSize;
3768 memcpy( &( ( char* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( char ) );
3769 memcpy( buffer_, &( ( char* ) buffer )[fromInSize], fromZeroSize * sizeof( char ) );
3771 case RTAUDIO_SINT16:
3772 memcpy( &( ( short* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( short ) );
3773 memcpy( buffer_, &( ( short* ) buffer )[fromInSize], fromZeroSize * sizeof( short ) );
3775 case RTAUDIO_SINT24:
3776 memcpy( &( ( S24* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( S24 ) );
3777 memcpy( buffer_, &( ( S24* ) buffer )[fromInSize], fromZeroSize * sizeof( S24 ) );
3779 case RTAUDIO_SINT32:
3780 memcpy( &( ( int* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( int ) );
3781 memcpy( buffer_, &( ( int* ) buffer )[fromInSize], fromZeroSize * sizeof( int ) );
3783 case RTAUDIO_FLOAT32:
3784 memcpy( &( ( float* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( float ) );
3785 memcpy( buffer_, &( ( float* ) buffer )[fromInSize], fromZeroSize * sizeof( float ) );
3787 case RTAUDIO_FLOAT64:
3788 memcpy( &( ( double* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( double ) );
3789 memcpy( buffer_, &( ( double* ) buffer )[fromInSize], fromZeroSize * sizeof( double ) );
3793 // update "in" index
3794 inIndex_ += bufferSize;
3795 inIndex_ %= bufferSize_;
3800 // attempt to pull a buffer from the ring buffer from the current "out" index
3801 bool pullBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
3803 if ( !buffer || // incoming buffer is NULL
3804 bufferSize == 0 || // incoming buffer has no data
3805 bufferSize > bufferSize_ ) // incoming buffer too large
3810 unsigned int relInIndex = inIndex_;
3811 unsigned int outIndexEnd = outIndex_ + bufferSize;
3812 if ( relInIndex < outIndex_ && outIndexEnd >= bufferSize_ ) {
3813 relInIndex += bufferSize_;
3816 // "out" index can begin at and end on the "in" index
3817 if ( outIndex_ < relInIndex && outIndexEnd > relInIndex ) {
3818 return false; // not enough space between "out" index and "in" index
3821 // copy buffer from internal to external
3822 int fromZeroSize = outIndex_ + bufferSize - bufferSize_;
3823 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
3824 int fromOutSize = bufferSize - fromZeroSize;
3829 memcpy( buffer, &( ( char* ) buffer_ )[outIndex_], fromOutSize * sizeof( char ) );
3830 memcpy( &( ( char* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( char ) );
3832 case RTAUDIO_SINT16:
3833 memcpy( buffer, &( ( short* ) buffer_ )[outIndex_], fromOutSize * sizeof( short ) );
3834 memcpy( &( ( short* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( short ) );
3836 case RTAUDIO_SINT24:
3837 memcpy( buffer, &( ( S24* ) buffer_ )[outIndex_], fromOutSize * sizeof( S24 ) );
3838 memcpy( &( ( S24* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( S24 ) );
3840 case RTAUDIO_SINT32:
3841 memcpy( buffer, &( ( int* ) buffer_ )[outIndex_], fromOutSize * sizeof( int ) );
3842 memcpy( &( ( int* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( int ) );
3844 case RTAUDIO_FLOAT32:
3845 memcpy( buffer, &( ( float* ) buffer_ )[outIndex_], fromOutSize * sizeof( float ) );
3846 memcpy( &( ( float* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( float ) );
3848 case RTAUDIO_FLOAT64:
3849 memcpy( buffer, &( ( double* ) buffer_ )[outIndex_], fromOutSize * sizeof( double ) );
3850 memcpy( &( ( double* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( double ) );
3854 // update "out" index
3855 outIndex_ += bufferSize;
3856 outIndex_ %= bufferSize_;
3863 unsigned int bufferSize_;
3864 unsigned int inIndex_;
3865 unsigned int outIndex_;
3868 //-----------------------------------------------------------------------------
3870 // A structure to hold various information related to the WASAPI implementation.
3873 IAudioClient* captureAudioClient;
3874 IAudioClient* renderAudioClient;
3875 IAudioCaptureClient* captureClient;
3876 IAudioRenderClient* renderClient;
3877 HANDLE captureEvent;
3881 : captureAudioClient( NULL ),
3882 renderAudioClient( NULL ),
3883 captureClient( NULL ),
3884 renderClient( NULL ),
3885 captureEvent( NULL ),
3886 renderEvent( NULL ) {}
3889 //=============================================================================
3891 RtApiWasapi::RtApiWasapi()
3892 : coInitialized_( false ), deviceEnumerator_( NULL )
3894 // WASAPI can run either apartment or multi-threaded
3895 HRESULT hr = CoInitialize( NULL );
3896 if ( !FAILED( hr ) )
3897 coInitialized_ = true;
3899 // Instantiate device enumerator
3900 hr = CoCreateInstance( __uuidof( MMDeviceEnumerator ), NULL,
3901 CLSCTX_ALL, __uuidof( IMMDeviceEnumerator ),
3902 ( void** ) &deviceEnumerator_ );
3904 if ( FAILED( hr ) ) {
3905 errorText_ = "RtApiWasapi::RtApiWasapi: Unable to instantiate device enumerator";
3906 error( RtAudioError::DRIVER_ERROR );
3910 //-----------------------------------------------------------------------------
3912 RtApiWasapi::~RtApiWasapi()
3914 if ( stream_.state != STREAM_CLOSED )
3917 SAFE_RELEASE( deviceEnumerator_ );
3919 // If this object previously called CoInitialize()
3920 if ( coInitialized_ )
3924 //=============================================================================
3926 unsigned int RtApiWasapi::getDeviceCount( void )
3928 unsigned int captureDeviceCount = 0;
3929 unsigned int renderDeviceCount = 0;
3931 IMMDeviceCollection* captureDevices = NULL;
3932 IMMDeviceCollection* renderDevices = NULL;
3934 // Count capture devices
3936 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
3937 if ( FAILED( hr ) ) {
3938 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device collection.";
3942 hr = captureDevices->GetCount( &captureDeviceCount );
3943 if ( FAILED( hr ) ) {
3944 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device count.";
3948 // Count render devices
3949 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
3950 if ( FAILED( hr ) ) {
3951 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device collection.";
3955 hr = renderDevices->GetCount( &renderDeviceCount );
3956 if ( FAILED( hr ) ) {
3957 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device count.";
3962 // release all references
3963 SAFE_RELEASE( captureDevices );
3964 SAFE_RELEASE( renderDevices );
3966 if ( errorText_.empty() )
3967 return captureDeviceCount + renderDeviceCount;
3969 error( RtAudioError::DRIVER_ERROR );
3973 //-----------------------------------------------------------------------------
3975 RtAudio::DeviceInfo RtApiWasapi::getDeviceInfo( unsigned int device )
3977 RtAudio::DeviceInfo info;
3978 unsigned int captureDeviceCount = 0;
3979 unsigned int renderDeviceCount = 0;
3980 std::string defaultDeviceName;
3981 bool isCaptureDevice = false;
3983 PROPVARIANT deviceNameProp;
3984 PROPVARIANT defaultDeviceNameProp;
3986 IMMDeviceCollection* captureDevices = NULL;
3987 IMMDeviceCollection* renderDevices = NULL;
3988 IMMDevice* devicePtr = NULL;
3989 IMMDevice* defaultDevicePtr = NULL;
3990 IAudioClient* audioClient = NULL;
3991 IPropertyStore* devicePropStore = NULL;
3992 IPropertyStore* defaultDevicePropStore = NULL;
3994 WAVEFORMATEX* deviceFormat = NULL;
3995 WAVEFORMATEX* closestMatchFormat = NULL;
3998 info.probed = false;
4000 // Count capture devices
4002 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4003 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4004 if ( FAILED( hr ) ) {
4005 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device collection.";
4009 hr = captureDevices->GetCount( &captureDeviceCount );
4010 if ( FAILED( hr ) ) {
4011 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device count.";
4015 // Count render devices
4016 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4017 if ( FAILED( hr ) ) {
4018 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device collection.";
4022 hr = renderDevices->GetCount( &renderDeviceCount );
4023 if ( FAILED( hr ) ) {
4024 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device count.";
4028 // validate device index
4029 if ( device >= captureDeviceCount + renderDeviceCount ) {
4030 errorText_ = "RtApiWasapi::getDeviceInfo: Invalid device index.";
4031 errorType = RtAudioError::INVALID_USE;
4035 // determine whether index falls within capture or render devices
4036 if ( device >= renderDeviceCount ) {
4037 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
4038 if ( FAILED( hr ) ) {
4039 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device handle.";
4042 isCaptureDevice = true;
4045 hr = renderDevices->Item( device, &devicePtr );
4046 if ( FAILED( hr ) ) {
4047 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device handle.";
4050 isCaptureDevice = false;
4053 // get default device name
4054 if ( isCaptureDevice ) {
4055 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eCapture, eConsole, &defaultDevicePtr );
4056 if ( FAILED( hr ) ) {
4057 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default capture device handle.";
4062 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eRender, eConsole, &defaultDevicePtr );
4063 if ( FAILED( hr ) ) {
4064 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default render device handle.";
4069 hr = defaultDevicePtr->OpenPropertyStore( STGM_READ, &defaultDevicePropStore );
4070 if ( FAILED( hr ) ) {
4071 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open default device property store.";
4074 PropVariantInit( &defaultDeviceNameProp );
4076 hr = defaultDevicePropStore->GetValue( PKEY_Device_FriendlyName, &defaultDeviceNameProp );
4077 if ( FAILED( hr ) ) {
4078 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default device property: PKEY_Device_FriendlyName.";
4082 defaultDeviceName = convertCharPointerToStdString(defaultDeviceNameProp.pwszVal);
4085 hr = devicePtr->OpenPropertyStore( STGM_READ, &devicePropStore );
4086 if ( FAILED( hr ) ) {
4087 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open device property store.";
4091 PropVariantInit( &deviceNameProp );
4093 hr = devicePropStore->GetValue( PKEY_Device_FriendlyName, &deviceNameProp );
4094 if ( FAILED( hr ) ) {
4095 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device property: PKEY_Device_FriendlyName.";
4099 info.name =convertCharPointerToStdString(deviceNameProp.pwszVal);
4102 if ( isCaptureDevice ) {
4103 info.isDefaultInput = info.name == defaultDeviceName;
4104 info.isDefaultOutput = false;
4107 info.isDefaultInput = false;
4108 info.isDefaultOutput = info.name == defaultDeviceName;
4112 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL, NULL, ( void** ) &audioClient );
4113 if ( FAILED( hr ) ) {
4114 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device audio client.";
4118 hr = audioClient->GetMixFormat( &deviceFormat );
4119 if ( FAILED( hr ) ) {
4120 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device mix format.";
4124 if ( isCaptureDevice ) {
4125 info.inputChannels = deviceFormat->nChannels;
4126 info.outputChannels = 0;
4127 info.duplexChannels = 0;
4130 info.inputChannels = 0;
4131 info.outputChannels = deviceFormat->nChannels;
4132 info.duplexChannels = 0;
4135 // sample rates (WASAPI only supports the one native sample rate)
4136 info.preferredSampleRate = deviceFormat->nSamplesPerSec;
4138 info.sampleRates.clear();
4139 info.sampleRates.push_back( deviceFormat->nSamplesPerSec );
4142 info.nativeFormats = 0;
4144 if ( deviceFormat->wFormatTag == WAVE_FORMAT_IEEE_FLOAT ||
4145 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
4146 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_IEEE_FLOAT ) )
4148 if ( deviceFormat->wBitsPerSample == 32 ) {
4149 info.nativeFormats |= RTAUDIO_FLOAT32;
4151 else if ( deviceFormat->wBitsPerSample == 64 ) {
4152 info.nativeFormats |= RTAUDIO_FLOAT64;
4155 else if ( deviceFormat->wFormatTag == WAVE_FORMAT_PCM ||
4156 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
4157 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_PCM ) )
4159 if ( deviceFormat->wBitsPerSample == 8 ) {
4160 info.nativeFormats |= RTAUDIO_SINT8;
4162 else if ( deviceFormat->wBitsPerSample == 16 ) {
4163 info.nativeFormats |= RTAUDIO_SINT16;
4165 else if ( deviceFormat->wBitsPerSample == 24 ) {
4166 info.nativeFormats |= RTAUDIO_SINT24;
4168 else if ( deviceFormat->wBitsPerSample == 32 ) {
4169 info.nativeFormats |= RTAUDIO_SINT32;
4177 // release all references
4178 PropVariantClear( &deviceNameProp );
4179 PropVariantClear( &defaultDeviceNameProp );
4181 SAFE_RELEASE( captureDevices );
4182 SAFE_RELEASE( renderDevices );
4183 SAFE_RELEASE( devicePtr );
4184 SAFE_RELEASE( defaultDevicePtr );
4185 SAFE_RELEASE( audioClient );
4186 SAFE_RELEASE( devicePropStore );
4187 SAFE_RELEASE( defaultDevicePropStore );
4189 CoTaskMemFree( deviceFormat );
4190 CoTaskMemFree( closestMatchFormat );
4192 if ( !errorText_.empty() )
4197 //-----------------------------------------------------------------------------
4199 unsigned int RtApiWasapi::getDefaultOutputDevice( void )
4201 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
4202 if ( getDeviceInfo( i ).isDefaultOutput ) {
4210 //-----------------------------------------------------------------------------
4212 unsigned int RtApiWasapi::getDefaultInputDevice( void )
4214 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
4215 if ( getDeviceInfo( i ).isDefaultInput ) {
4223 //-----------------------------------------------------------------------------
4225 void RtApiWasapi::closeStream( void )
4227 if ( stream_.state == STREAM_CLOSED ) {
4228 errorText_ = "RtApiWasapi::closeStream: No open stream to close.";
4229 error( RtAudioError::WARNING );
4233 if ( stream_.state != STREAM_STOPPED )
4236 // clean up stream memory
4237 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient )
4238 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient )
4240 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureClient )
4241 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderClient )
4243 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent )
4244 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent );
4246 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent )
4247 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent );
4249 delete ( WasapiHandle* ) stream_.apiHandle;
4250 stream_.apiHandle = NULL;
4252 for ( int i = 0; i < 2; i++ ) {
4253 if ( stream_.userBuffer[i] ) {
4254 free( stream_.userBuffer[i] );
4255 stream_.userBuffer[i] = 0;
4259 if ( stream_.deviceBuffer ) {
4260 free( stream_.deviceBuffer );
4261 stream_.deviceBuffer = 0;
4264 // update stream state
4265 stream_.state = STREAM_CLOSED;
4268 //-----------------------------------------------------------------------------
4270 void RtApiWasapi::startStream( void )
4274 if ( stream_.state == STREAM_RUNNING ) {
4275 errorText_ = "RtApiWasapi::startStream: The stream is already running.";
4276 error( RtAudioError::WARNING );
4280 // update stream state
4281 stream_.state = STREAM_RUNNING;
4283 // create WASAPI stream thread
4284 stream_.callbackInfo.thread = ( ThreadHandle ) CreateThread( NULL, 0, runWasapiThread, this, CREATE_SUSPENDED, NULL );
4286 if ( !stream_.callbackInfo.thread ) {
4287 errorText_ = "RtApiWasapi::startStream: Unable to instantiate callback thread.";
4288 error( RtAudioError::THREAD_ERROR );
4291 SetThreadPriority( ( void* ) stream_.callbackInfo.thread, stream_.callbackInfo.priority );
4292 ResumeThread( ( void* ) stream_.callbackInfo.thread );
4296 //-----------------------------------------------------------------------------
4298 void RtApiWasapi::stopStream( void )
4302 if ( stream_.state == STREAM_STOPPED ) {
4303 errorText_ = "RtApiWasapi::stopStream: The stream is already stopped.";
4304 error( RtAudioError::WARNING );
4308 // inform stream thread by setting stream state to STREAM_STOPPING
4309 stream_.state = STREAM_STOPPING;
4311 // wait until stream thread is stopped
4312 while( stream_.state != STREAM_STOPPED ) {
4316 // Wait for the last buffer to play before stopping.
4317 Sleep( 1000 * stream_.bufferSize / stream_.sampleRate );
4319 // stop capture client if applicable
4320 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) {
4321 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient->Stop();
4322 if ( FAILED( hr ) ) {
4323 errorText_ = "RtApiWasapi::stopStream: Unable to stop capture stream.";
4324 error( RtAudioError::DRIVER_ERROR );
4329 // stop render client if applicable
4330 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) {
4331 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient->Stop();
4332 if ( FAILED( hr ) ) {
4333 errorText_ = "RtApiWasapi::stopStream: Unable to stop render stream.";
4334 error( RtAudioError::DRIVER_ERROR );
4339 // close thread handle
4340 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
4341 errorText_ = "RtApiWasapi::stopStream: Unable to close callback thread.";
4342 error( RtAudioError::THREAD_ERROR );
4346 stream_.callbackInfo.thread = (ThreadHandle) NULL;
4349 //-----------------------------------------------------------------------------
4351 void RtApiWasapi::abortStream( void )
4355 if ( stream_.state == STREAM_STOPPED ) {
4356 errorText_ = "RtApiWasapi::abortStream: The stream is already stopped.";
4357 error( RtAudioError::WARNING );
4361 // inform stream thread by setting stream state to STREAM_STOPPING
4362 stream_.state = STREAM_STOPPING;
4364 // wait until stream thread is stopped
4365 while ( stream_.state != STREAM_STOPPED ) {
4369 // stop capture client if applicable
4370 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) {
4371 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient->Stop();
4372 if ( FAILED( hr ) ) {
4373 errorText_ = "RtApiWasapi::abortStream: Unable to stop capture stream.";
4374 error( RtAudioError::DRIVER_ERROR );
4379 // stop render client if applicable
4380 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) {
4381 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient->Stop();
4382 if ( FAILED( hr ) ) {
4383 errorText_ = "RtApiWasapi::abortStream: Unable to stop render stream.";
4384 error( RtAudioError::DRIVER_ERROR );
4389 // close thread handle
4390 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
4391 errorText_ = "RtApiWasapi::abortStream: Unable to close callback thread.";
4392 error( RtAudioError::THREAD_ERROR );
4396 stream_.callbackInfo.thread = (ThreadHandle) NULL;
4399 //-----------------------------------------------------------------------------
4401 bool RtApiWasapi::probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
4402 unsigned int firstChannel, unsigned int sampleRate,
4403 RtAudioFormat format, unsigned int* bufferSize,
4404 RtAudio::StreamOptions* options )
4406 bool methodResult = FAILURE;
4407 unsigned int captureDeviceCount = 0;
4408 unsigned int renderDeviceCount = 0;
4410 IMMDeviceCollection* captureDevices = NULL;
4411 IMMDeviceCollection* renderDevices = NULL;
4412 IMMDevice* devicePtr = NULL;
4413 WAVEFORMATEX* deviceFormat = NULL;
4414 unsigned int bufferBytes;
4415 stream_.state = STREAM_STOPPED;
4416 RtAudio::DeviceInfo deviceInfo;
4418 // create API Handle if not already created
4419 if ( !stream_.apiHandle )
4420 stream_.apiHandle = ( void* ) new WasapiHandle();
4422 // Count capture devices
4424 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4425 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4426 if ( FAILED( hr ) ) {
4427 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device collection.";
4431 hr = captureDevices->GetCount( &captureDeviceCount );
4432 if ( FAILED( hr ) ) {
4433 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device count.";
4437 // Count render devices
4438 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4439 if ( FAILED( hr ) ) {
4440 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device collection.";
4444 hr = renderDevices->GetCount( &renderDeviceCount );
4445 if ( FAILED( hr ) ) {
4446 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device count.";
4450 // validate device index
4451 if ( device >= captureDeviceCount + renderDeviceCount ) {
4452 errorType = RtAudioError::INVALID_USE;
4453 errorText_ = "RtApiWasapi::probeDeviceOpen: Invalid device index.";
4457 deviceInfo = getDeviceInfo( device );
4459 // validate sample rate
4460 if ( sampleRate != deviceInfo.preferredSampleRate )
4462 errorType = RtAudioError::INVALID_USE;
4463 std::stringstream ss;
4464 ss << "RtApiWasapi::probeDeviceOpen: " << sampleRate
4465 << "Hz sample rate not supported. This device only supports "
4466 << deviceInfo.preferredSampleRate << "Hz.";
4467 errorText_ = ss.str();
4471 // determine whether index falls within capture or render devices
4472 if ( device >= renderDeviceCount ) {
4473 if ( mode != INPUT ) {
4474 errorType = RtAudioError::INVALID_USE;
4475 errorText_ = "RtApiWasapi::probeDeviceOpen: Capture device selected as output device.";
4479 // retrieve captureAudioClient from devicePtr
4480 IAudioClient*& captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4482 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
4483 if ( FAILED( hr ) ) {
4484 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device handle.";
4488 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4489 NULL, ( void** ) &captureAudioClient );
4490 if ( FAILED( hr ) ) {
4491 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device audio client.";
4495 hr = captureAudioClient->GetMixFormat( &deviceFormat );
4496 if ( FAILED( hr ) ) {
4497 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device mix format.";
4501 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4502 captureAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4505 if ( mode != OUTPUT ) {
4506 errorType = RtAudioError::INVALID_USE;
4507 errorText_ = "RtApiWasapi::probeDeviceOpen: Render device selected as input device.";
4511 // retrieve renderAudioClient from devicePtr
4512 IAudioClient*& renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4514 hr = renderDevices->Item( device, &devicePtr );
4515 if ( FAILED( hr ) ) {
4516 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device handle.";
4520 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4521 NULL, ( void** ) &renderAudioClient );
4522 if ( FAILED( hr ) ) {
4523 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device audio client.";
4527 hr = renderAudioClient->GetMixFormat( &deviceFormat );
4528 if ( FAILED( hr ) ) {
4529 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device mix format.";
4533 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4534 renderAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4538 if ( ( stream_.mode == OUTPUT && mode == INPUT ) ||
4539 ( stream_.mode == INPUT && mode == OUTPUT ) ) {
4540 stream_.mode = DUPLEX;
4543 stream_.mode = mode;
4546 stream_.device[mode] = device;
4547 stream_.doByteSwap[mode] = false;
4548 stream_.sampleRate = sampleRate;
4549 stream_.bufferSize = *bufferSize;
4550 stream_.nBuffers = 1;
4551 stream_.nUserChannels[mode] = channels;
4552 stream_.channelOffset[mode] = firstChannel;
4553 stream_.userFormat = format;
4554 stream_.deviceFormat[mode] = deviceInfo.nativeFormats;
4556 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
4557 stream_.userInterleaved = false;
4559 stream_.userInterleaved = true;
4560 stream_.deviceInterleaved[mode] = true;
4562 // Set flags for buffer conversion.
4563 stream_.doConvertBuffer[mode] = false;
4564 if ( stream_.userFormat != stream_.deviceFormat[mode] ||
4565 stream_.nUserChannels != stream_.nDeviceChannels )
4566 stream_.doConvertBuffer[mode] = true;
4567 else if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
4568 stream_.nUserChannels[mode] > 1 )
4569 stream_.doConvertBuffer[mode] = true;
4571 if ( stream_.doConvertBuffer[mode] )
4572 setConvertInfo( mode, 0 );
4574 // Allocate necessary internal buffers
4575 bufferBytes = stream_.nUserChannels[mode] * stream_.bufferSize * formatBytes( stream_.userFormat );
4577 stream_.userBuffer[mode] = ( char* ) calloc( bufferBytes, 1 );
4578 if ( !stream_.userBuffer[mode] ) {
4579 errorType = RtAudioError::MEMORY_ERROR;
4580 errorText_ = "RtApiWasapi::probeDeviceOpen: Error allocating user buffer memory.";
4584 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME )
4585 stream_.callbackInfo.priority = 15;
4587 stream_.callbackInfo.priority = 0;
4589 ///! TODO: RTAUDIO_MINIMIZE_LATENCY // Provide stream buffers directly to callback
4590 ///! TODO: RTAUDIO_HOG_DEVICE // Exclusive mode
4592 methodResult = SUCCESS;
4596 SAFE_RELEASE( captureDevices );
4597 SAFE_RELEASE( renderDevices );
4598 SAFE_RELEASE( devicePtr );
4599 CoTaskMemFree( deviceFormat );
4601 // if method failed, close the stream
4602 if ( methodResult == FAILURE )
4605 if ( !errorText_.empty() )
4607 return methodResult;
4610 //=============================================================================
4612 DWORD WINAPI RtApiWasapi::runWasapiThread( void* wasapiPtr )
4615 ( ( RtApiWasapi* ) wasapiPtr )->wasapiThread();
4620 DWORD WINAPI RtApiWasapi::stopWasapiThread( void* wasapiPtr )
4623 ( ( RtApiWasapi* ) wasapiPtr )->stopStream();
4628 DWORD WINAPI RtApiWasapi::abortWasapiThread( void* wasapiPtr )
4631 ( ( RtApiWasapi* ) wasapiPtr )->abortStream();
4636 //-----------------------------------------------------------------------------
4638 void RtApiWasapi::wasapiThread()
4640 // as this is a new thread, we must CoInitialize it
4641 CoInitialize( NULL );
4645 IAudioClient* captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4646 IAudioClient* renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4647 IAudioCaptureClient* captureClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureClient;
4648 IAudioRenderClient* renderClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderClient;
4649 HANDLE captureEvent = ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent;
4650 HANDLE renderEvent = ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent;
4652 WAVEFORMATEX* captureFormat = NULL;
4653 WAVEFORMATEX* renderFormat = NULL;
4654 WasapiBuffer captureBuffer;
4655 WasapiBuffer renderBuffer;
4657 // declare local stream variables
4658 RtAudioCallback callback = ( RtAudioCallback ) stream_.callbackInfo.callback;
4659 BYTE* streamBuffer = NULL;
4660 unsigned long captureFlags = 0;
4661 unsigned int bufferFrameCount = 0;
4662 unsigned int numFramesPadding = 0;
4663 bool callbackPushed = false;
4664 bool callbackPulled = false;
4665 bool callbackStopped = false;
4666 int callbackResult = 0;
4668 unsigned int deviceBuffSize = 0;
4671 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4673 // Attempt to assign "Pro Audio" characteristic to thread
4674 HMODULE AvrtDll = LoadLibrary( (LPCTSTR) "AVRT.dll" );
4676 DWORD taskIndex = 0;
4677 TAvSetMmThreadCharacteristicsPtr AvSetMmThreadCharacteristicsPtr = ( TAvSetMmThreadCharacteristicsPtr ) GetProcAddress( AvrtDll, "AvSetMmThreadCharacteristicsW" );
4678 AvSetMmThreadCharacteristicsPtr( L"Pro Audio", &taskIndex );
4679 FreeLibrary( AvrtDll );
4682 // start capture stream if applicable
4683 if ( captureAudioClient ) {
4684 hr = captureAudioClient->GetMixFormat( &captureFormat );
4685 if ( FAILED( hr ) ) {
4686 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
4690 // initialize capture stream according to desire buffer size
4691 REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) stream_.bufferSize * 10000000 / captureFormat->nSamplesPerSec );
4693 if ( !captureClient ) {
4694 hr = captureAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
4695 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
4696 desiredBufferPeriod,
4697 desiredBufferPeriod,
4700 if ( FAILED( hr ) ) {
4701 errorText_ = "RtApiWasapi::wasapiThread: Unable to initialize capture audio client.";
4705 hr = captureAudioClient->GetService( __uuidof( IAudioCaptureClient ),
4706 ( void** ) &captureClient );
4707 if ( FAILED( hr ) ) {
4708 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve capture client handle.";
4712 // configure captureEvent to trigger on every available capture buffer
4713 captureEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
4714 if ( !captureEvent ) {
4715 errorType = RtAudioError::SYSTEM_ERROR;
4716 errorText_ = "RtApiWasapi::wasapiThread: Unable to create capture event.";
4720 hr = captureAudioClient->SetEventHandle( captureEvent );
4721 if ( FAILED( hr ) ) {
4722 errorText_ = "RtApiWasapi::wasapiThread: Unable to set capture event handle.";
4726 ( ( WasapiHandle* ) stream_.apiHandle )->captureClient = captureClient;
4727 ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent = captureEvent;
4730 unsigned int inBufferSize = 0;
4731 hr = captureAudioClient->GetBufferSize( &inBufferSize );
4732 if ( FAILED( hr ) ) {
4733 errorText_ = "RtApiWasapi::wasapiThread: Unable to get capture buffer size.";
4737 // scale outBufferSize according to stream->user sample rate ratio
4738 unsigned int outBufferSize = ( unsigned int ) stream_.bufferSize * stream_.nDeviceChannels[INPUT];
4739 inBufferSize *= stream_.nDeviceChannels[INPUT];
4741 // set captureBuffer size
4742 captureBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[INPUT] ) );
4744 // reset the capture stream
4745 hr = captureAudioClient->Reset();
4746 if ( FAILED( hr ) ) {
4747 errorText_ = "RtApiWasapi::wasapiThread: Unable to reset capture stream.";
4751 // start the capture stream
4752 hr = captureAudioClient->Start();
4753 if ( FAILED( hr ) ) {
4754 errorText_ = "RtApiWasapi::wasapiThread: Unable to start capture stream.";
4759 // start render stream if applicable
4760 if ( renderAudioClient ) {
4761 hr = renderAudioClient->GetMixFormat( &renderFormat );
4762 if ( FAILED( hr ) ) {
4763 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
4767 // initialize render stream according to desire buffer size
4768 REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) stream_.bufferSize * 10000000 / renderFormat->nSamplesPerSec );
4770 if ( !renderClient ) {
4771 hr = renderAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
4772 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
4773 desiredBufferPeriod,
4774 desiredBufferPeriod,
4777 if ( FAILED( hr ) ) {
4778 errorText_ = "RtApiWasapi::wasapiThread: Unable to initialize render audio client.";
4782 hr = renderAudioClient->GetService( __uuidof( IAudioRenderClient ),
4783 ( void** ) &renderClient );
4784 if ( FAILED( hr ) ) {
4785 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render client handle.";
4789 // configure renderEvent to trigger on every available render buffer
4790 renderEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
4791 if ( !renderEvent ) {
4792 errorType = RtAudioError::SYSTEM_ERROR;
4793 errorText_ = "RtApiWasapi::wasapiThread: Unable to create render event.";
4797 hr = renderAudioClient->SetEventHandle( renderEvent );
4798 if ( FAILED( hr ) ) {
4799 errorText_ = "RtApiWasapi::wasapiThread: Unable to set render event handle.";
4803 ( ( WasapiHandle* ) stream_.apiHandle )->renderClient = renderClient;
4804 ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent = renderEvent;
4807 unsigned int outBufferSize = 0;
4808 hr = renderAudioClient->GetBufferSize( &outBufferSize );
4809 if ( FAILED( hr ) ) {
4810 errorText_ = "RtApiWasapi::wasapiThread: Unable to get render buffer size.";
4814 // scale inBufferSize according to user->stream sample rate ratio
4815 unsigned int inBufferSize = ( unsigned int ) stream_.bufferSize * stream_.nDeviceChannels[OUTPUT];
4816 outBufferSize *= stream_.nDeviceChannels[OUTPUT];
4818 // set renderBuffer size
4819 renderBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[OUTPUT] ) );
4821 // reset the render stream
4822 hr = renderAudioClient->Reset();
4823 if ( FAILED( hr ) ) {
4824 errorText_ = "RtApiWasapi::wasapiThread: Unable to reset render stream.";
4828 // start the render stream
4829 hr = renderAudioClient->Start();
4830 if ( FAILED( hr ) ) {
4831 errorText_ = "RtApiWasapi::wasapiThread: Unable to start render stream.";
4836 if ( stream_.mode == INPUT ) {
4837 using namespace std; // for roundf
4838 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
4840 else if ( stream_.mode == OUTPUT ) {
4841 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
4843 else if ( stream_.mode == DUPLEX ) {
4844 deviceBuffSize = std::max( stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
4845 stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
4848 stream_.deviceBuffer = ( char* ) malloc( deviceBuffSize );
4849 if ( !stream_.deviceBuffer ) {
4850 errorType = RtAudioError::MEMORY_ERROR;
4851 errorText_ = "RtApiWasapi::wasapiThread: Error allocating device buffer memory.";
4855 // stream process loop
4856 while ( stream_.state != STREAM_STOPPING ) {
4857 if ( !callbackPulled ) {
4860 // 1. Pull callback buffer from inputBuffer
4861 // 2. If 1. was successful: Convert callback buffer to user format
4863 if ( captureAudioClient ) {
4864 // Pull callback buffer from inputBuffer
4865 callbackPulled = captureBuffer.pullBuffer( stream_.deviceBuffer,
4866 ( unsigned int ) stream_.bufferSize * stream_.nDeviceChannels[INPUT],
4867 stream_.deviceFormat[INPUT] );
4869 if ( callbackPulled ) {
4870 if ( stream_.doConvertBuffer[INPUT] ) {
4871 // Convert callback buffer to user format
4872 convertBuffer( stream_.userBuffer[INPUT],
4873 stream_.deviceBuffer,
4874 stream_.convertInfo[INPUT] );
4877 // no further conversion, simple copy deviceBuffer to userBuffer
4878 memcpy( stream_.userBuffer[INPUT],
4879 stream_.deviceBuffer,
4880 stream_.bufferSize * stream_.nUserChannels[INPUT] * formatBytes( stream_.userFormat ) );
4885 // if there is no capture stream, set callbackPulled flag
4886 callbackPulled = true;
4891 // 1. Execute user callback method
4892 // 2. Handle return value from callback
4894 // if callback has not requested the stream to stop
4895 if ( callbackPulled && !callbackStopped ) {
4896 // Execute user callback method
4897 callbackResult = callback( stream_.userBuffer[OUTPUT],
4898 stream_.userBuffer[INPUT],
4901 captureFlags & AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY ? RTAUDIO_INPUT_OVERFLOW : 0,
4902 stream_.callbackInfo.userData );
4904 // Handle return value from callback
4905 if ( callbackResult == 1 ) {
4906 // instantiate a thread to stop this thread
4907 HANDLE threadHandle = CreateThread( NULL, 0, stopWasapiThread, this, 0, NULL );
4908 if ( !threadHandle ) {
4909 errorType = RtAudioError::THREAD_ERROR;
4910 errorText_ = "RtApiWasapi::wasapiThread: Unable to instantiate stream stop thread.";
4913 else if ( !CloseHandle( threadHandle ) ) {
4914 errorType = RtAudioError::THREAD_ERROR;
4915 errorText_ = "RtApiWasapi::wasapiThread: Unable to close stream stop thread handle.";
4919 callbackStopped = true;
4921 else if ( callbackResult == 2 ) {
4922 // instantiate a thread to stop this thread
4923 HANDLE threadHandle = CreateThread( NULL, 0, abortWasapiThread, this, 0, NULL );
4924 if ( !threadHandle ) {
4925 errorType = RtAudioError::THREAD_ERROR;
4926 errorText_ = "RtApiWasapi::wasapiThread: Unable to instantiate stream abort thread.";
4929 else if ( !CloseHandle( threadHandle ) ) {
4930 errorType = RtAudioError::THREAD_ERROR;
4931 errorText_ = "RtApiWasapi::wasapiThread: Unable to close stream abort thread handle.";
4935 callbackStopped = true;
4942 // 1. Convert callback buffer to stream format
4943 // 2. Push callback buffer into outputBuffer
4945 if ( renderAudioClient && callbackPulled ) {
4946 if ( stream_.doConvertBuffer[OUTPUT] ) {
4947 // Convert callback buffer to stream format
4948 convertBuffer( stream_.deviceBuffer,
4949 stream_.userBuffer[OUTPUT],
4950 stream_.convertInfo[OUTPUT] );
4954 // Push callback buffer into outputBuffer
4955 callbackPushed = renderBuffer.pushBuffer( stream_.deviceBuffer,
4956 stream_.bufferSize * stream_.nDeviceChannels[OUTPUT],
4957 stream_.deviceFormat[OUTPUT] );
4960 // if there is no render stream, set callbackPushed flag
4961 callbackPushed = true;
4966 // 1. Get capture buffer from stream
4967 // 2. Push capture buffer into inputBuffer
4968 // 3. If 2. was successful: Release capture buffer
4970 if ( captureAudioClient ) {
4971 // if the callback input buffer was not pulled from captureBuffer, wait for next capture event
4972 if ( !callbackPulled ) {
4973 WaitForSingleObject( captureEvent, INFINITE );
4976 // Get capture buffer from stream
4977 hr = captureClient->GetBuffer( &streamBuffer,
4979 &captureFlags, NULL, NULL );
4980 if ( FAILED( hr ) ) {
4981 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve capture buffer.";
4985 if ( bufferFrameCount != 0 ) {
4986 // Push capture buffer into inputBuffer
4987 if ( captureBuffer.pushBuffer( ( char* ) streamBuffer,
4988 bufferFrameCount * stream_.nDeviceChannels[INPUT],
4989 stream_.deviceFormat[INPUT] ) )
4991 // Release capture buffer
4992 hr = captureClient->ReleaseBuffer( bufferFrameCount );
4993 if ( FAILED( hr ) ) {
4994 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5000 // Inform WASAPI that capture was unsuccessful
5001 hr = captureClient->ReleaseBuffer( 0 );
5002 if ( FAILED( hr ) ) {
5003 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5010 // Inform WASAPI that capture was unsuccessful
5011 hr = captureClient->ReleaseBuffer( 0 );
5012 if ( FAILED( hr ) ) {
5013 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5021 // 1. Get render buffer from stream
5022 // 2. Pull next buffer from outputBuffer
5023 // 3. If 2. was successful: Fill render buffer with next buffer
5024 // Release render buffer
5026 if ( renderAudioClient ) {
5027 // if the callback output buffer was not pushed to renderBuffer, wait for next render event
5028 if ( callbackPulled && !callbackPushed ) {
5029 WaitForSingleObject( renderEvent, INFINITE );
5032 // Get render buffer from stream
5033 hr = renderAudioClient->GetBufferSize( &bufferFrameCount );
5034 if ( FAILED( hr ) ) {
5035 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer size.";
5039 hr = renderAudioClient->GetCurrentPadding( &numFramesPadding );
5040 if ( FAILED( hr ) ) {
5041 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer padding.";
5045 bufferFrameCount -= numFramesPadding;
5047 if ( bufferFrameCount != 0 ) {
5048 hr = renderClient->GetBuffer( bufferFrameCount, &streamBuffer );
5049 if ( FAILED( hr ) ) {
5050 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer.";
5054 // Pull next buffer from outputBuffer
5055 // Fill render buffer with next buffer
5056 if ( renderBuffer.pullBuffer( ( char* ) streamBuffer,
5057 bufferFrameCount * stream_.nDeviceChannels[OUTPUT],
5058 stream_.deviceFormat[OUTPUT] ) )
5060 // Release render buffer
5061 hr = renderClient->ReleaseBuffer( bufferFrameCount, 0 );
5062 if ( FAILED( hr ) ) {
5063 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5069 // Inform WASAPI that render was unsuccessful
5070 hr = renderClient->ReleaseBuffer( 0, 0 );
5071 if ( FAILED( hr ) ) {
5072 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5079 // Inform WASAPI that render was unsuccessful
5080 hr = renderClient->ReleaseBuffer( 0, 0 );
5081 if ( FAILED( hr ) ) {
5082 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5088 // if the callback buffer was pushed renderBuffer reset callbackPulled flag
5089 if ( callbackPushed ) {
5090 callbackPulled = false;
5092 RtApi::tickStreamTime();
5099 CoTaskMemFree( captureFormat );
5100 CoTaskMemFree( renderFormat );
5104 // update stream state
5105 stream_.state = STREAM_STOPPED;
5107 if ( errorText_.empty() )
5113 //******************** End of __WINDOWS_WASAPI__ *********************//
5117 #if defined(__WINDOWS_DS__) // Windows DirectSound API
5119 // Modified by Robin Davies, October 2005
5120 // - Improvements to DirectX pointer chasing.
5121 // - Bug fix for non-power-of-two Asio granularity used by Edirol PCR-A30.
5122 // - Auto-call CoInitialize for DSOUND and ASIO platforms.
5123 // Various revisions for RtAudio 4.0 by Gary Scavone, April 2007
5124 // Changed device query structure for RtAudio 4.0.7, January 2010
5126 #include <windows.h>
5127 #include <process.h>
5128 #include <mmsystem.h>
5132 #include <algorithm>
5134 #if defined(__MINGW32__)
5135 // missing from latest mingw winapi
5136 #define WAVE_FORMAT_96M08 0x00010000 /* 96 kHz, Mono, 8-bit */
5137 #define WAVE_FORMAT_96S08 0x00020000 /* 96 kHz, Stereo, 8-bit */
5138 #define WAVE_FORMAT_96M16 0x00040000 /* 96 kHz, Mono, 16-bit */
5139 #define WAVE_FORMAT_96S16 0x00080000 /* 96 kHz, Stereo, 16-bit */
5142 #define MINIMUM_DEVICE_BUFFER_SIZE 32768
5144 #ifdef _MSC_VER // if Microsoft Visual C++
5145 #pragma comment( lib, "winmm.lib" ) // then, auto-link winmm.lib. Otherwise, it has to be added manually.
5148 static inline DWORD dsPointerBetween( DWORD pointer, DWORD laterPointer, DWORD earlierPointer, DWORD bufferSize )
5150 if ( pointer > bufferSize ) pointer -= bufferSize;
5151 if ( laterPointer < earlierPointer ) laterPointer += bufferSize;
5152 if ( pointer < earlierPointer ) pointer += bufferSize;
5153 return pointer >= earlierPointer && pointer < laterPointer;
5156 // A structure to hold various information related to the DirectSound
5157 // API implementation.
5159 unsigned int drainCounter; // Tracks callback counts when draining
5160 bool internalDrain; // Indicates if stop is initiated from callback or not.
5164 UINT bufferPointer[2];
5165 DWORD dsBufferSize[2];
5166 DWORD dsPointerLeadTime[2]; // the number of bytes ahead of the safe pointer to lead by.
5170 :drainCounter(0), internalDrain(false) { id[0] = 0; id[1] = 0; buffer[0] = 0; buffer[1] = 0; xrun[0] = false; xrun[1] = false; bufferPointer[0] = 0; bufferPointer[1] = 0; }
5173 // Declarations for utility functions, callbacks, and structures
5174 // specific to the DirectSound implementation.
5175 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
5176 LPCTSTR description,
5180 static const char* getErrorString( int code );
5182 static unsigned __stdcall callbackHandler( void *ptr );
5191 : found(false) { validId[0] = false; validId[1] = false; }
5194 struct DsProbeData {
5196 std::vector<struct DsDevice>* dsDevices;
5199 RtApiDs :: RtApiDs()
5201 // Dsound will run both-threaded. If CoInitialize fails, then just
5202 // accept whatever the mainline chose for a threading model.
5203 coInitialized_ = false;
5204 HRESULT hr = CoInitialize( NULL );
5205 if ( !FAILED( hr ) ) coInitialized_ = true;
5208 RtApiDs :: ~RtApiDs()
5210 if ( stream_.state != STREAM_CLOSED ) closeStream();
5211 if ( coInitialized_ ) CoUninitialize(); // balanced call.
5214 // The DirectSound default output is always the first device.
5215 unsigned int RtApiDs :: getDefaultOutputDevice( void )
5220 // The DirectSound default input is always the first input device,
5221 // which is the first capture device enumerated.
5222 unsigned int RtApiDs :: getDefaultInputDevice( void )
5227 unsigned int RtApiDs :: getDeviceCount( void )
5229 // Set query flag for previously found devices to false, so that we
5230 // can check for any devices that have disappeared.
5231 for ( unsigned int i=0; i<dsDevices.size(); i++ )
5232 dsDevices[i].found = false;
5234 // Query DirectSound devices.
5235 struct DsProbeData probeInfo;
5236 probeInfo.isInput = false;
5237 probeInfo.dsDevices = &dsDevices;
5238 HRESULT result = DirectSoundEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
5239 if ( FAILED( result ) ) {
5240 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating output devices!";
5241 errorText_ = errorStream_.str();
5242 error( RtAudioError::WARNING );
5245 // Query DirectSoundCapture devices.
5246 probeInfo.isInput = true;
5247 result = DirectSoundCaptureEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
5248 if ( FAILED( result ) ) {
5249 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating input devices!";
5250 errorText_ = errorStream_.str();
5251 error( RtAudioError::WARNING );
5254 // Clean out any devices that may have disappeared (code update submitted by Eli Zehngut).
5255 for ( unsigned int i=0; i<dsDevices.size(); ) {
5256 if ( dsDevices[i].found == false ) dsDevices.erase( dsDevices.begin() + i );
5260 return static_cast<unsigned int>(dsDevices.size());
5263 RtAudio::DeviceInfo RtApiDs :: getDeviceInfo( unsigned int device )
5265 RtAudio::DeviceInfo info;
5266 info.probed = false;
5268 if ( dsDevices.size() == 0 ) {
5269 // Force a query of all devices
5271 if ( dsDevices.size() == 0 ) {
5272 errorText_ = "RtApiDs::getDeviceInfo: no devices found!";
5273 error( RtAudioError::INVALID_USE );
5278 if ( device >= dsDevices.size() ) {
5279 errorText_ = "RtApiDs::getDeviceInfo: device ID is invalid!";
5280 error( RtAudioError::INVALID_USE );
5285 if ( dsDevices[ device ].validId[0] == false ) goto probeInput;
5287 LPDIRECTSOUND output;
5289 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
5290 if ( FAILED( result ) ) {
5291 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
5292 errorText_ = errorStream_.str();
5293 error( RtAudioError::WARNING );
5297 outCaps.dwSize = sizeof( outCaps );
5298 result = output->GetCaps( &outCaps );
5299 if ( FAILED( result ) ) {
5301 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting capabilities!";
5302 errorText_ = errorStream_.str();
5303 error( RtAudioError::WARNING );
5307 // Get output channel information.
5308 info.outputChannels = ( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ? 2 : 1;
5310 // Get sample rate information.
5311 info.sampleRates.clear();
5312 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
5313 if ( SAMPLE_RATES[k] >= (unsigned int) outCaps.dwMinSecondarySampleRate &&
5314 SAMPLE_RATES[k] <= (unsigned int) outCaps.dwMaxSecondarySampleRate ) {
5315 info.sampleRates.push_back( SAMPLE_RATES[k] );
5317 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
5318 info.preferredSampleRate = SAMPLE_RATES[k];
5322 // Get format information.
5323 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT ) info.nativeFormats |= RTAUDIO_SINT16;
5324 if ( outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) info.nativeFormats |= RTAUDIO_SINT8;
5328 if ( getDefaultOutputDevice() == device )
5329 info.isDefaultOutput = true;
5331 if ( dsDevices[ device ].validId[1] == false ) {
5332 info.name = dsDevices[ device ].name;
5339 LPDIRECTSOUNDCAPTURE input;
5340 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
5341 if ( FAILED( result ) ) {
5342 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
5343 errorText_ = errorStream_.str();
5344 error( RtAudioError::WARNING );
5349 inCaps.dwSize = sizeof( inCaps );
5350 result = input->GetCaps( &inCaps );
5351 if ( FAILED( result ) ) {
5353 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting object capabilities (" << dsDevices[ device ].name << ")!";
5354 errorText_ = errorStream_.str();
5355 error( RtAudioError::WARNING );
5359 // Get input channel information.
5360 info.inputChannels = inCaps.dwChannels;
5362 // Get sample rate and format information.
5363 std::vector<unsigned int> rates;
5364 if ( inCaps.dwChannels >= 2 ) {
5365 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5366 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5367 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5368 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5369 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5370 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5371 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5372 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5374 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
5375 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) rates.push_back( 11025 );
5376 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) rates.push_back( 22050 );
5377 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) rates.push_back( 44100 );
5378 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) rates.push_back( 96000 );
5380 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
5381 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) rates.push_back( 11025 );
5382 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) rates.push_back( 22050 );
5383 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) rates.push_back( 44100 );
5384 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) rates.push_back( 96000 );
5387 else if ( inCaps.dwChannels == 1 ) {
5388 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5389 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5390 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5391 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5392 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5393 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5394 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5395 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5397 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
5398 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) rates.push_back( 11025 );
5399 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) rates.push_back( 22050 );
5400 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) rates.push_back( 44100 );
5401 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) rates.push_back( 96000 );
5403 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
5404 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) rates.push_back( 11025 );
5405 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) rates.push_back( 22050 );
5406 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) rates.push_back( 44100 );
5407 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) rates.push_back( 96000 );
5410 else info.inputChannels = 0; // technically, this would be an error
5414 if ( info.inputChannels == 0 ) return info;
5416 // Copy the supported rates to the info structure but avoid duplication.
5418 for ( unsigned int i=0; i<rates.size(); i++ ) {
5420 for ( unsigned int j=0; j<info.sampleRates.size(); j++ ) {
5421 if ( rates[i] == info.sampleRates[j] ) {
5426 if ( found == false ) info.sampleRates.push_back( rates[i] );
5428 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
5430 // If device opens for both playback and capture, we determine the channels.
5431 if ( info.outputChannels > 0 && info.inputChannels > 0 )
5432 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
5434 if ( device == 0 ) info.isDefaultInput = true;
5436 // Copy name and return.
5437 info.name = dsDevices[ device ].name;
5442 bool RtApiDs :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
5443 unsigned int firstChannel, unsigned int sampleRate,
5444 RtAudioFormat format, unsigned int *bufferSize,
5445 RtAudio::StreamOptions *options )
5447 if ( channels + firstChannel > 2 ) {
5448 errorText_ = "RtApiDs::probeDeviceOpen: DirectSound does not support more than 2 channels per device.";
5452 size_t nDevices = dsDevices.size();
5453 if ( nDevices == 0 ) {
5454 // This should not happen because a check is made before this function is called.
5455 errorText_ = "RtApiDs::probeDeviceOpen: no devices found!";
5459 if ( device >= nDevices ) {
5460 // This should not happen because a check is made before this function is called.
5461 errorText_ = "RtApiDs::probeDeviceOpen: device ID is invalid!";
5465 if ( mode == OUTPUT ) {
5466 if ( dsDevices[ device ].validId[0] == false ) {
5467 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support output!";
5468 errorText_ = errorStream_.str();
5472 else { // mode == INPUT
5473 if ( dsDevices[ device ].validId[1] == false ) {
5474 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support input!";
5475 errorText_ = errorStream_.str();
5480 // According to a note in PortAudio, using GetDesktopWindow()
5481 // instead of GetForegroundWindow() is supposed to avoid problems
5482 // that occur when the application's window is not the foreground
5483 // window. Also, if the application window closes before the
5484 // DirectSound buffer, DirectSound can crash. In the past, I had
5485 // problems when using GetDesktopWindow() but it seems fine now
5486 // (January 2010). I'll leave it commented here.
5487 // HWND hWnd = GetForegroundWindow();
5488 HWND hWnd = GetDesktopWindow();
5490 // Check the numberOfBuffers parameter and limit the lowest value to
5491 // two. This is a judgement call and a value of two is probably too
5492 // low for capture, but it should work for playback.
5494 if ( options ) nBuffers = options->numberOfBuffers;
5495 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) nBuffers = 2;
5496 if ( nBuffers < 2 ) nBuffers = 3;
5498 // Check the lower range of the user-specified buffer size and set
5499 // (arbitrarily) to a lower bound of 32.
5500 if ( *bufferSize < 32 ) *bufferSize = 32;
5502 // Create the wave format structure. The data format setting will
5503 // be determined later.
5504 WAVEFORMATEX waveFormat;
5505 ZeroMemory( &waveFormat, sizeof(WAVEFORMATEX) );
5506 waveFormat.wFormatTag = WAVE_FORMAT_PCM;
5507 waveFormat.nChannels = channels + firstChannel;
5508 waveFormat.nSamplesPerSec = (unsigned long) sampleRate;
5510 // Determine the device buffer size. By default, we'll use the value
5511 // defined above (32K), but we will grow it to make allowances for
5512 // very large software buffer sizes.
5513 DWORD dsBufferSize = MINIMUM_DEVICE_BUFFER_SIZE;
5514 DWORD dsPointerLeadTime = 0;
5516 void *ohandle = 0, *bhandle = 0;
5518 if ( mode == OUTPUT ) {
5520 LPDIRECTSOUND output;
5521 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
5522 if ( FAILED( result ) ) {
5523 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
5524 errorText_ = errorStream_.str();
5529 outCaps.dwSize = sizeof( outCaps );
5530 result = output->GetCaps( &outCaps );
5531 if ( FAILED( result ) ) {
5533 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting capabilities (" << dsDevices[ device ].name << ")!";
5534 errorText_ = errorStream_.str();
5538 // Check channel information.
5539 if ( channels + firstChannel == 2 && !( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ) {
5540 errorStream_ << "RtApiDs::getDeviceInfo: the output device (" << dsDevices[ device ].name << ") does not support stereo playback.";
5541 errorText_ = errorStream_.str();
5545 // Check format information. Use 16-bit format unless not
5546 // supported or user requests 8-bit.
5547 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT &&
5548 !( format == RTAUDIO_SINT8 && outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) ) {
5549 waveFormat.wBitsPerSample = 16;
5550 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
5553 waveFormat.wBitsPerSample = 8;
5554 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
5556 stream_.userFormat = format;
5558 // Update wave format structure and buffer information.
5559 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
5560 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
5561 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
5563 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
5564 while ( dsPointerLeadTime * 2U > dsBufferSize )
5567 // Set cooperative level to DSSCL_EXCLUSIVE ... sound stops when window focus changes.
5568 // result = output->SetCooperativeLevel( hWnd, DSSCL_EXCLUSIVE );
5569 // Set cooperative level to DSSCL_PRIORITY ... sound remains when window focus changes.
5570 result = output->SetCooperativeLevel( hWnd, DSSCL_PRIORITY );
5571 if ( FAILED( result ) ) {
5573 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting cooperative level (" << dsDevices[ device ].name << ")!";
5574 errorText_ = errorStream_.str();
5578 // Even though we will write to the secondary buffer, we need to
5579 // access the primary buffer to set the correct output format
5580 // (since the default is 8-bit, 22 kHz!). Setup the DS primary
5581 // buffer description.
5582 DSBUFFERDESC bufferDescription;
5583 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
5584 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
5585 bufferDescription.dwFlags = DSBCAPS_PRIMARYBUFFER;
5587 // Obtain the primary buffer
5588 LPDIRECTSOUNDBUFFER buffer;
5589 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
5590 if ( FAILED( result ) ) {
5592 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") accessing primary buffer (" << dsDevices[ device ].name << ")!";
5593 errorText_ = errorStream_.str();
5597 // Set the primary DS buffer sound format.
5598 result = buffer->SetFormat( &waveFormat );
5599 if ( FAILED( result ) ) {
5601 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting primary buffer format (" << dsDevices[ device ].name << ")!";
5602 errorText_ = errorStream_.str();
5606 // Setup the secondary DS buffer description.
5607 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
5608 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
5609 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
5610 DSBCAPS_GLOBALFOCUS |
5611 DSBCAPS_GETCURRENTPOSITION2 |
5612 DSBCAPS_LOCHARDWARE ); // Force hardware mixing
5613 bufferDescription.dwBufferBytes = dsBufferSize;
5614 bufferDescription.lpwfxFormat = &waveFormat;
5616 // Try to create the secondary DS buffer. If that doesn't work,
5617 // try to use software mixing. Otherwise, there's a problem.
5618 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
5619 if ( FAILED( result ) ) {
5620 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
5621 DSBCAPS_GLOBALFOCUS |
5622 DSBCAPS_GETCURRENTPOSITION2 |
5623 DSBCAPS_LOCSOFTWARE ); // Force software mixing
5624 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
5625 if ( FAILED( result ) ) {
5627 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating secondary buffer (" << dsDevices[ device ].name << ")!";
5628 errorText_ = errorStream_.str();
5633 // Get the buffer size ... might be different from what we specified.
5635 dsbcaps.dwSize = sizeof( DSBCAPS );
5636 result = buffer->GetCaps( &dsbcaps );
5637 if ( FAILED( result ) ) {
5640 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
5641 errorText_ = errorStream_.str();
5645 dsBufferSize = dsbcaps.dwBufferBytes;
5647 // Lock the DS buffer
5650 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
5651 if ( FAILED( result ) ) {
5654 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking buffer (" << dsDevices[ device ].name << ")!";
5655 errorText_ = errorStream_.str();
5659 // Zero the DS buffer
5660 ZeroMemory( audioPtr, dataLen );
5662 // Unlock the DS buffer
5663 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
5664 if ( FAILED( result ) ) {
5667 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking buffer (" << dsDevices[ device ].name << ")!";
5668 errorText_ = errorStream_.str();
5672 ohandle = (void *) output;
5673 bhandle = (void *) buffer;
5676 if ( mode == INPUT ) {
5678 LPDIRECTSOUNDCAPTURE input;
5679 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
5680 if ( FAILED( result ) ) {
5681 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
5682 errorText_ = errorStream_.str();
5687 inCaps.dwSize = sizeof( inCaps );
5688 result = input->GetCaps( &inCaps );
5689 if ( FAILED( result ) ) {
5691 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting input capabilities (" << dsDevices[ device ].name << ")!";
5692 errorText_ = errorStream_.str();
5696 // Check channel information.
5697 if ( inCaps.dwChannels < channels + firstChannel ) {
5698 errorText_ = "RtApiDs::getDeviceInfo: the input device does not support requested input channels.";
5702 // Check format information. Use 16-bit format unless user
5704 DWORD deviceFormats;
5705 if ( channels + firstChannel == 2 ) {
5706 deviceFormats = WAVE_FORMAT_1S08 | WAVE_FORMAT_2S08 | WAVE_FORMAT_4S08 | WAVE_FORMAT_96S08;
5707 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
5708 waveFormat.wBitsPerSample = 8;
5709 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
5711 else { // assume 16-bit is supported
5712 waveFormat.wBitsPerSample = 16;
5713 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
5716 else { // channel == 1
5717 deviceFormats = WAVE_FORMAT_1M08 | WAVE_FORMAT_2M08 | WAVE_FORMAT_4M08 | WAVE_FORMAT_96M08;
5718 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
5719 waveFormat.wBitsPerSample = 8;
5720 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
5722 else { // assume 16-bit is supported
5723 waveFormat.wBitsPerSample = 16;
5724 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
5727 stream_.userFormat = format;
5729 // Update wave format structure and buffer information.
5730 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
5731 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
5732 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
5734 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
5735 while ( dsPointerLeadTime * 2U > dsBufferSize )
5738 // Setup the secondary DS buffer description.
5739 DSCBUFFERDESC bufferDescription;
5740 ZeroMemory( &bufferDescription, sizeof( DSCBUFFERDESC ) );
5741 bufferDescription.dwSize = sizeof( DSCBUFFERDESC );
5742 bufferDescription.dwFlags = 0;
5743 bufferDescription.dwReserved = 0;
5744 bufferDescription.dwBufferBytes = dsBufferSize;
5745 bufferDescription.lpwfxFormat = &waveFormat;
5747 // Create the capture buffer.
5748 LPDIRECTSOUNDCAPTUREBUFFER buffer;
5749 result = input->CreateCaptureBuffer( &bufferDescription, &buffer, NULL );
5750 if ( FAILED( result ) ) {
5752 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating input buffer (" << dsDevices[ device ].name << ")!";
5753 errorText_ = errorStream_.str();
5757 // Get the buffer size ... might be different from what we specified.
5759 dscbcaps.dwSize = sizeof( DSCBCAPS );
5760 result = buffer->GetCaps( &dscbcaps );
5761 if ( FAILED( result ) ) {
5764 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
5765 errorText_ = errorStream_.str();
5769 dsBufferSize = dscbcaps.dwBufferBytes;
5771 // NOTE: We could have a problem here if this is a duplex stream
5772 // and the play and capture hardware buffer sizes are different
5773 // (I'm actually not sure if that is a problem or not).
5774 // Currently, we are not verifying that.
5776 // Lock the capture buffer
5779 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
5780 if ( FAILED( result ) ) {
5783 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking input buffer (" << dsDevices[ device ].name << ")!";
5784 errorText_ = errorStream_.str();
5789 ZeroMemory( audioPtr, dataLen );
5791 // Unlock the buffer
5792 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
5793 if ( FAILED( result ) ) {
5796 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking input buffer (" << dsDevices[ device ].name << ")!";
5797 errorText_ = errorStream_.str();
5801 ohandle = (void *) input;
5802 bhandle = (void *) buffer;
5805 // Set various stream parameters
5806 DsHandle *handle = 0;
5807 stream_.nDeviceChannels[mode] = channels + firstChannel;
5808 stream_.nUserChannels[mode] = channels;
5809 stream_.bufferSize = *bufferSize;
5810 stream_.channelOffset[mode] = firstChannel;
5811 stream_.deviceInterleaved[mode] = true;
5812 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
5813 else stream_.userInterleaved = true;
5815 // Set flag for buffer conversion
5816 stream_.doConvertBuffer[mode] = false;
5817 if (stream_.nUserChannels[mode] != stream_.nDeviceChannels[mode])
5818 stream_.doConvertBuffer[mode] = true;
5819 if (stream_.userFormat != stream_.deviceFormat[mode])
5820 stream_.doConvertBuffer[mode] = true;
5821 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
5822 stream_.nUserChannels[mode] > 1 )
5823 stream_.doConvertBuffer[mode] = true;
5825 // Allocate necessary internal buffers
5826 long bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
5827 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
5828 if ( stream_.userBuffer[mode] == NULL ) {
5829 errorText_ = "RtApiDs::probeDeviceOpen: error allocating user buffer memory.";
5833 if ( stream_.doConvertBuffer[mode] ) {
5835 bool makeBuffer = true;
5836 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
5837 if ( mode == INPUT ) {
5838 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
5839 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
5840 if ( bufferBytes <= (long) bytesOut ) makeBuffer = false;
5845 bufferBytes *= *bufferSize;
5846 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
5847 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
5848 if ( stream_.deviceBuffer == NULL ) {
5849 errorText_ = "RtApiDs::probeDeviceOpen: error allocating device buffer memory.";
5855 // Allocate our DsHandle structures for the stream.
5856 if ( stream_.apiHandle == 0 ) {
5858 handle = new DsHandle;
5860 catch ( std::bad_alloc& ) {
5861 errorText_ = "RtApiDs::probeDeviceOpen: error allocating AsioHandle memory.";
5865 // Create a manual-reset event.
5866 handle->condition = CreateEvent( NULL, // no security
5867 TRUE, // manual-reset
5868 FALSE, // non-signaled initially
5870 stream_.apiHandle = (void *) handle;
5873 handle = (DsHandle *) stream_.apiHandle;
5874 handle->id[mode] = ohandle;
5875 handle->buffer[mode] = bhandle;
5876 handle->dsBufferSize[mode] = dsBufferSize;
5877 handle->dsPointerLeadTime[mode] = dsPointerLeadTime;
5879 stream_.device[mode] = device;
5880 stream_.state = STREAM_STOPPED;
5881 if ( stream_.mode == OUTPUT && mode == INPUT )
5882 // We had already set up an output stream.
5883 stream_.mode = DUPLEX;
5885 stream_.mode = mode;
5886 stream_.nBuffers = nBuffers;
5887 stream_.sampleRate = sampleRate;
5889 // Setup the buffer conversion information structure.
5890 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
5892 // Setup the callback thread.
5893 if ( stream_.callbackInfo.isRunning == false ) {
5895 stream_.callbackInfo.isRunning = true;
5896 stream_.callbackInfo.object = (void *) this;
5897 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &callbackHandler,
5898 &stream_.callbackInfo, 0, &threadId );
5899 if ( stream_.callbackInfo.thread == 0 ) {
5900 errorText_ = "RtApiDs::probeDeviceOpen: error creating callback thread!";
5904 // Boost DS thread priority
5905 SetThreadPriority( (HANDLE) stream_.callbackInfo.thread, THREAD_PRIORITY_HIGHEST );
5911 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
5912 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
5913 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
5914 if ( buffer ) buffer->Release();
5917 if ( handle->buffer[1] ) {
5918 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
5919 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
5920 if ( buffer ) buffer->Release();
5923 CloseHandle( handle->condition );
5925 stream_.apiHandle = 0;
5928 for ( int i=0; i<2; i++ ) {
5929 if ( stream_.userBuffer[i] ) {
5930 free( stream_.userBuffer[i] );
5931 stream_.userBuffer[i] = 0;
5935 if ( stream_.deviceBuffer ) {
5936 free( stream_.deviceBuffer );
5937 stream_.deviceBuffer = 0;
5940 stream_.state = STREAM_CLOSED;
5944 void RtApiDs :: closeStream()
5946 if ( stream_.state == STREAM_CLOSED ) {
5947 errorText_ = "RtApiDs::closeStream(): no open stream to close!";
5948 error( RtAudioError::WARNING );
5952 // Stop the callback thread.
5953 stream_.callbackInfo.isRunning = false;
5954 WaitForSingleObject( (HANDLE) stream_.callbackInfo.thread, INFINITE );
5955 CloseHandle( (HANDLE) stream_.callbackInfo.thread );
5957 DsHandle *handle = (DsHandle *) stream_.apiHandle;
5959 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
5960 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
5961 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
5968 if ( handle->buffer[1] ) {
5969 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
5970 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
5977 CloseHandle( handle->condition );
5979 stream_.apiHandle = 0;
5982 for ( int i=0; i<2; i++ ) {
5983 if ( stream_.userBuffer[i] ) {
5984 free( stream_.userBuffer[i] );
5985 stream_.userBuffer[i] = 0;
5989 if ( stream_.deviceBuffer ) {
5990 free( stream_.deviceBuffer );
5991 stream_.deviceBuffer = 0;
5994 stream_.mode = UNINITIALIZED;
5995 stream_.state = STREAM_CLOSED;
5998 void RtApiDs :: startStream()
6001 if ( stream_.state == STREAM_RUNNING ) {
6002 errorText_ = "RtApiDs::startStream(): the stream is already running!";
6003 error( RtAudioError::WARNING );
6007 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6009 // Increase scheduler frequency on lesser windows (a side-effect of
6010 // increasing timer accuracy). On greater windows (Win2K or later),
6011 // this is already in effect.
6012 timeBeginPeriod( 1 );
6014 buffersRolling = false;
6015 duplexPrerollBytes = 0;
6017 if ( stream_.mode == DUPLEX ) {
6018 // 0.5 seconds of silence in DUPLEX mode while the devices spin up and synchronize.
6019 duplexPrerollBytes = (int) ( 0.5 * stream_.sampleRate * formatBytes( stream_.deviceFormat[1] ) * stream_.nDeviceChannels[1] );
6023 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6025 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6026 result = buffer->Play( 0, 0, DSBPLAY_LOOPING );
6027 if ( FAILED( result ) ) {
6028 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting output buffer!";
6029 errorText_ = errorStream_.str();
6034 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6036 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6037 result = buffer->Start( DSCBSTART_LOOPING );
6038 if ( FAILED( result ) ) {
6039 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting input buffer!";
6040 errorText_ = errorStream_.str();
6045 handle->drainCounter = 0;
6046 handle->internalDrain = false;
6047 ResetEvent( handle->condition );
6048 stream_.state = STREAM_RUNNING;
6051 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
6054 void RtApiDs :: stopStream()
6057 if ( stream_.state == STREAM_STOPPED ) {
6058 errorText_ = "RtApiDs::stopStream(): the stream is already stopped!";
6059 error( RtAudioError::WARNING );
6066 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6067 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6068 if ( handle->drainCounter == 0 ) {
6069 handle->drainCounter = 2;
6070 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
6073 stream_.state = STREAM_STOPPED;
6075 MUTEX_LOCK( &stream_.mutex );
6077 // Stop the buffer and clear memory
6078 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6079 result = buffer->Stop();
6080 if ( FAILED( result ) ) {
6081 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping output buffer!";
6082 errorText_ = errorStream_.str();
6086 // Lock the buffer and clear it so that if we start to play again,
6087 // we won't have old data playing.
6088 result = buffer->Lock( 0, handle->dsBufferSize[0], &audioPtr, &dataLen, NULL, NULL, 0 );
6089 if ( FAILED( result ) ) {
6090 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking output buffer!";
6091 errorText_ = errorStream_.str();
6095 // Zero the DS buffer
6096 ZeroMemory( audioPtr, dataLen );
6098 // Unlock the DS buffer
6099 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6100 if ( FAILED( result ) ) {
6101 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking output buffer!";
6102 errorText_ = errorStream_.str();
6106 // If we start playing again, we must begin at beginning of buffer.
6107 handle->bufferPointer[0] = 0;
6110 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6111 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6115 stream_.state = STREAM_STOPPED;
6117 if ( stream_.mode != DUPLEX )
6118 MUTEX_LOCK( &stream_.mutex );
6120 result = buffer->Stop();
6121 if ( FAILED( result ) ) {
6122 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping input buffer!";
6123 errorText_ = errorStream_.str();
6127 // Lock the buffer and clear it so that if we start to play again,
6128 // we won't have old data playing.
6129 result = buffer->Lock( 0, handle->dsBufferSize[1], &audioPtr, &dataLen, NULL, NULL, 0 );
6130 if ( FAILED( result ) ) {
6131 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking input buffer!";
6132 errorText_ = errorStream_.str();
6136 // Zero the DS buffer
6137 ZeroMemory( audioPtr, dataLen );
6139 // Unlock the DS buffer
6140 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6141 if ( FAILED( result ) ) {
6142 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking input buffer!";
6143 errorText_ = errorStream_.str();
6147 // If we start recording again, we must begin at beginning of buffer.
6148 handle->bufferPointer[1] = 0;
6152 timeEndPeriod( 1 ); // revert to normal scheduler frequency on lesser windows.
6153 MUTEX_UNLOCK( &stream_.mutex );
6155 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
6158 void RtApiDs :: abortStream()
6161 if ( stream_.state == STREAM_STOPPED ) {
6162 errorText_ = "RtApiDs::abortStream(): the stream is already stopped!";
6163 error( RtAudioError::WARNING );
6167 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6168 handle->drainCounter = 2;
6173 void RtApiDs :: callbackEvent()
6175 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) {
6176 Sleep( 50 ); // sleep 50 milliseconds
6180 if ( stream_.state == STREAM_CLOSED ) {
6181 errorText_ = "RtApiDs::callbackEvent(): the stream is closed ... this shouldn't happen!";
6182 error( RtAudioError::WARNING );
6186 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
6187 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6189 // Check if we were draining the stream and signal is finished.
6190 if ( handle->drainCounter > stream_.nBuffers + 2 ) {
6192 stream_.state = STREAM_STOPPING;
6193 if ( handle->internalDrain == false )
6194 SetEvent( handle->condition );
6200 // Invoke user callback to get fresh output data UNLESS we are
6202 if ( handle->drainCounter == 0 ) {
6203 RtAudioCallback callback = (RtAudioCallback) info->callback;
6204 double streamTime = getStreamTime();
6205 RtAudioStreamStatus status = 0;
6206 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
6207 status |= RTAUDIO_OUTPUT_UNDERFLOW;
6208 handle->xrun[0] = false;
6210 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
6211 status |= RTAUDIO_INPUT_OVERFLOW;
6212 handle->xrun[1] = false;
6214 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
6215 stream_.bufferSize, streamTime, status, info->userData );
6216 if ( cbReturnValue == 2 ) {
6217 stream_.state = STREAM_STOPPING;
6218 handle->drainCounter = 2;
6222 else if ( cbReturnValue == 1 ) {
6223 handle->drainCounter = 1;
6224 handle->internalDrain = true;
6229 DWORD currentWritePointer, safeWritePointer;
6230 DWORD currentReadPointer, safeReadPointer;
6231 UINT nextWritePointer;
6233 LPVOID buffer1 = NULL;
6234 LPVOID buffer2 = NULL;
6235 DWORD bufferSize1 = 0;
6236 DWORD bufferSize2 = 0;
6241 MUTEX_LOCK( &stream_.mutex );
6242 if ( stream_.state == STREAM_STOPPED ) {
6243 MUTEX_UNLOCK( &stream_.mutex );
6247 if ( buffersRolling == false ) {
6248 if ( stream_.mode == DUPLEX ) {
6249 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
6251 // It takes a while for the devices to get rolling. As a result,
6252 // there's no guarantee that the capture and write device pointers
6253 // will move in lockstep. Wait here for both devices to start
6254 // rolling, and then set our buffer pointers accordingly.
6255 // e.g. Crystal Drivers: the capture buffer starts up 5700 to 9600
6256 // bytes later than the write buffer.
6258 // Stub: a serious risk of having a pre-emptive scheduling round
6259 // take place between the two GetCurrentPosition calls... but I'm
6260 // really not sure how to solve the problem. Temporarily boost to
6261 // Realtime priority, maybe; but I'm not sure what priority the
6262 // DirectSound service threads run at. We *should* be roughly
6263 // within a ms or so of correct.
6265 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6266 LPDIRECTSOUNDCAPTUREBUFFER dsCaptureBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6268 DWORD startSafeWritePointer, startSafeReadPointer;
6270 result = dsWriteBuffer->GetCurrentPosition( NULL, &startSafeWritePointer );
6271 if ( FAILED( result ) ) {
6272 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6273 errorText_ = errorStream_.str();
6274 MUTEX_UNLOCK( &stream_.mutex );
6275 error( RtAudioError::SYSTEM_ERROR );
6278 result = dsCaptureBuffer->GetCurrentPosition( NULL, &startSafeReadPointer );
6279 if ( FAILED( result ) ) {
6280 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6281 errorText_ = errorStream_.str();
6282 MUTEX_UNLOCK( &stream_.mutex );
6283 error( RtAudioError::SYSTEM_ERROR );
6287 result = dsWriteBuffer->GetCurrentPosition( NULL, &safeWritePointer );
6288 if ( FAILED( result ) ) {
6289 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6290 errorText_ = errorStream_.str();
6291 MUTEX_UNLOCK( &stream_.mutex );
6292 error( RtAudioError::SYSTEM_ERROR );
6295 result = dsCaptureBuffer->GetCurrentPosition( NULL, &safeReadPointer );
6296 if ( FAILED( result ) ) {
6297 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6298 errorText_ = errorStream_.str();
6299 MUTEX_UNLOCK( &stream_.mutex );
6300 error( RtAudioError::SYSTEM_ERROR );
6303 if ( safeWritePointer != startSafeWritePointer && safeReadPointer != startSafeReadPointer ) break;
6307 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
6309 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
6310 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
6311 handle->bufferPointer[1] = safeReadPointer;
6313 else if ( stream_.mode == OUTPUT ) {
6315 // Set the proper nextWritePosition after initial startup.
6316 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6317 result = dsWriteBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
6318 if ( FAILED( result ) ) {
6319 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6320 errorText_ = errorStream_.str();
6321 MUTEX_UNLOCK( &stream_.mutex );
6322 error( RtAudioError::SYSTEM_ERROR );
6325 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
6326 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
6329 buffersRolling = true;
6332 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6334 LPDIRECTSOUNDBUFFER dsBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6336 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
6337 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
6338 bufferBytes *= formatBytes( stream_.userFormat );
6339 memset( stream_.userBuffer[0], 0, bufferBytes );
6342 // Setup parameters and do buffer conversion if necessary.
6343 if ( stream_.doConvertBuffer[0] ) {
6344 buffer = stream_.deviceBuffer;
6345 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
6346 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[0];
6347 bufferBytes *= formatBytes( stream_.deviceFormat[0] );
6350 buffer = stream_.userBuffer[0];
6351 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
6352 bufferBytes *= formatBytes( stream_.userFormat );
6355 // No byte swapping necessary in DirectSound implementation.
6357 // Ahhh ... windoze. 16-bit data is signed but 8-bit data is
6358 // unsigned. So, we need to convert our signed 8-bit data here to
6360 if ( stream_.deviceFormat[0] == RTAUDIO_SINT8 )
6361 for ( int i=0; i<bufferBytes; i++ ) buffer[i] = (unsigned char) ( buffer[i] + 128 );
6363 DWORD dsBufferSize = handle->dsBufferSize[0];
6364 nextWritePointer = handle->bufferPointer[0];
6366 DWORD endWrite, leadPointer;
6368 // Find out where the read and "safe write" pointers are.
6369 result = dsBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
6370 if ( FAILED( result ) ) {
6371 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6372 errorText_ = errorStream_.str();
6373 MUTEX_UNLOCK( &stream_.mutex );
6374 error( RtAudioError::SYSTEM_ERROR );
6378 // We will copy our output buffer into the region between
6379 // safeWritePointer and leadPointer. If leadPointer is not
6380 // beyond the next endWrite position, wait until it is.
6381 leadPointer = safeWritePointer + handle->dsPointerLeadTime[0];
6382 //std::cout << "safeWritePointer = " << safeWritePointer << ", leadPointer = " << leadPointer << ", nextWritePointer = " << nextWritePointer << std::endl;
6383 if ( leadPointer > dsBufferSize ) leadPointer -= dsBufferSize;
6384 if ( leadPointer < nextWritePointer ) leadPointer += dsBufferSize; // unwrap offset
6385 endWrite = nextWritePointer + bufferBytes;
6387 // Check whether the entire write region is behind the play pointer.
6388 if ( leadPointer >= endWrite ) break;
6390 // If we are here, then we must wait until the leadPointer advances
6391 // beyond the end of our next write region. We use the
6392 // Sleep() function to suspend operation until that happens.
6393 double millis = ( endWrite - leadPointer ) * 1000.0;
6394 millis /= ( formatBytes( stream_.deviceFormat[0]) * stream_.nDeviceChannels[0] * stream_.sampleRate);
6395 if ( millis < 1.0 ) millis = 1.0;
6396 Sleep( (DWORD) millis );
6399 if ( dsPointerBetween( nextWritePointer, safeWritePointer, currentWritePointer, dsBufferSize )
6400 || dsPointerBetween( endWrite, safeWritePointer, currentWritePointer, dsBufferSize ) ) {
6401 // We've strayed into the forbidden zone ... resync the read pointer.
6402 handle->xrun[0] = true;
6403 nextWritePointer = safeWritePointer + handle->dsPointerLeadTime[0] - bufferBytes;
6404 if ( nextWritePointer >= dsBufferSize ) nextWritePointer -= dsBufferSize;
6405 handle->bufferPointer[0] = nextWritePointer;
6406 endWrite = nextWritePointer + bufferBytes;
6409 // Lock free space in the buffer
6410 result = dsBuffer->Lock( nextWritePointer, bufferBytes, &buffer1,
6411 &bufferSize1, &buffer2, &bufferSize2, 0 );
6412 if ( FAILED( result ) ) {
6413 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking buffer during playback!";
6414 errorText_ = errorStream_.str();
6415 MUTEX_UNLOCK( &stream_.mutex );
6416 error( RtAudioError::SYSTEM_ERROR );
6420 // Copy our buffer into the DS buffer
6421 CopyMemory( buffer1, buffer, bufferSize1 );
6422 if ( buffer2 != NULL ) CopyMemory( buffer2, buffer+bufferSize1, bufferSize2 );
6424 // Update our buffer offset and unlock sound buffer
6425 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
6426 if ( FAILED( result ) ) {
6427 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking buffer during playback!";
6428 errorText_ = errorStream_.str();
6429 MUTEX_UNLOCK( &stream_.mutex );
6430 error( RtAudioError::SYSTEM_ERROR );
6433 nextWritePointer = ( nextWritePointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
6434 handle->bufferPointer[0] = nextWritePointer;
6437 // Don't bother draining input
6438 if ( handle->drainCounter ) {
6439 handle->drainCounter++;
6443 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6445 // Setup parameters.
6446 if ( stream_.doConvertBuffer[1] ) {
6447 buffer = stream_.deviceBuffer;
6448 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[1];
6449 bufferBytes *= formatBytes( stream_.deviceFormat[1] );
6452 buffer = stream_.userBuffer[1];
6453 bufferBytes = stream_.bufferSize * stream_.nUserChannels[1];
6454 bufferBytes *= formatBytes( stream_.userFormat );
6457 LPDIRECTSOUNDCAPTUREBUFFER dsBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6458 long nextReadPointer = handle->bufferPointer[1];
6459 DWORD dsBufferSize = handle->dsBufferSize[1];
6461 // Find out where the write and "safe read" pointers are.
6462 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
6463 if ( FAILED( result ) ) {
6464 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6465 errorText_ = errorStream_.str();
6466 MUTEX_UNLOCK( &stream_.mutex );
6467 error( RtAudioError::SYSTEM_ERROR );
6471 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
6472 DWORD endRead = nextReadPointer + bufferBytes;
6474 // Handling depends on whether we are INPUT or DUPLEX.
6475 // If we're in INPUT mode then waiting is a good thing. If we're in DUPLEX mode,
6476 // then a wait here will drag the write pointers into the forbidden zone.
6478 // In DUPLEX mode, rather than wait, we will back off the read pointer until
6479 // it's in a safe position. This causes dropouts, but it seems to be the only
6480 // practical way to sync up the read and write pointers reliably, given the
6481 // the very complex relationship between phase and increment of the read and write
6484 // In order to minimize audible dropouts in DUPLEX mode, we will
6485 // provide a pre-roll period of 0.5 seconds in which we return
6486 // zeros from the read buffer while the pointers sync up.
6488 if ( stream_.mode == DUPLEX ) {
6489 if ( safeReadPointer < endRead ) {
6490 if ( duplexPrerollBytes <= 0 ) {
6491 // Pre-roll time over. Be more agressive.
6492 int adjustment = endRead-safeReadPointer;
6494 handle->xrun[1] = true;
6496 // - large adjustments: we've probably run out of CPU cycles, so just resync exactly,
6497 // and perform fine adjustments later.
6498 // - small adjustments: back off by twice as much.
6499 if ( adjustment >= 2*bufferBytes )
6500 nextReadPointer = safeReadPointer-2*bufferBytes;
6502 nextReadPointer = safeReadPointer-bufferBytes-adjustment;
6504 if ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
6508 // In pre=roll time. Just do it.
6509 nextReadPointer = safeReadPointer - bufferBytes;
6510 while ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
6512 endRead = nextReadPointer + bufferBytes;
6515 else { // mode == INPUT
6516 while ( safeReadPointer < endRead && stream_.callbackInfo.isRunning ) {
6517 // See comments for playback.
6518 double millis = (endRead - safeReadPointer) * 1000.0;
6519 millis /= ( formatBytes(stream_.deviceFormat[1]) * stream_.nDeviceChannels[1] * stream_.sampleRate);
6520 if ( millis < 1.0 ) millis = 1.0;
6521 Sleep( (DWORD) millis );
6523 // Wake up and find out where we are now.
6524 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
6525 if ( FAILED( result ) ) {
6526 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6527 errorText_ = errorStream_.str();
6528 MUTEX_UNLOCK( &stream_.mutex );
6529 error( RtAudioError::SYSTEM_ERROR );
6533 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
6537 // Lock free space in the buffer
6538 result = dsBuffer->Lock( nextReadPointer, bufferBytes, &buffer1,
6539 &bufferSize1, &buffer2, &bufferSize2, 0 );
6540 if ( FAILED( result ) ) {
6541 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking capture buffer!";
6542 errorText_ = errorStream_.str();
6543 MUTEX_UNLOCK( &stream_.mutex );
6544 error( RtAudioError::SYSTEM_ERROR );
6548 if ( duplexPrerollBytes <= 0 ) {
6549 // Copy our buffer into the DS buffer
6550 CopyMemory( buffer, buffer1, bufferSize1 );
6551 if ( buffer2 != NULL ) CopyMemory( buffer+bufferSize1, buffer2, bufferSize2 );
6554 memset( buffer, 0, bufferSize1 );
6555 if ( buffer2 != NULL ) memset( buffer + bufferSize1, 0, bufferSize2 );
6556 duplexPrerollBytes -= bufferSize1 + bufferSize2;
6559 // Update our buffer offset and unlock sound buffer
6560 nextReadPointer = ( nextReadPointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
6561 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
6562 if ( FAILED( result ) ) {
6563 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking capture buffer!";
6564 errorText_ = errorStream_.str();
6565 MUTEX_UNLOCK( &stream_.mutex );
6566 error( RtAudioError::SYSTEM_ERROR );
6569 handle->bufferPointer[1] = nextReadPointer;
6571 // No byte swapping necessary in DirectSound implementation.
6573 // If necessary, convert 8-bit data from unsigned to signed.
6574 if ( stream_.deviceFormat[1] == RTAUDIO_SINT8 )
6575 for ( int j=0; j<bufferBytes; j++ ) buffer[j] = (signed char) ( buffer[j] - 128 );
6577 // Do buffer conversion if necessary.
6578 if ( stream_.doConvertBuffer[1] )
6579 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
6583 MUTEX_UNLOCK( &stream_.mutex );
6584 RtApi::tickStreamTime();
6587 // Definitions for utility functions and callbacks
6588 // specific to the DirectSound implementation.
6590 static unsigned __stdcall callbackHandler( void *ptr )
6592 CallbackInfo *info = (CallbackInfo *) ptr;
6593 RtApiDs *object = (RtApiDs *) info->object;
6594 bool* isRunning = &info->isRunning;
6596 while ( *isRunning == true ) {
6597 object->callbackEvent();
6604 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
6605 LPCTSTR description,
6609 struct DsProbeData& probeInfo = *(struct DsProbeData*) lpContext;
6610 std::vector<struct DsDevice>& dsDevices = *probeInfo.dsDevices;
6613 bool validDevice = false;
6614 if ( probeInfo.isInput == true ) {
6616 LPDIRECTSOUNDCAPTURE object;
6618 hr = DirectSoundCaptureCreate( lpguid, &object, NULL );
6619 if ( hr != DS_OK ) return TRUE;
6621 caps.dwSize = sizeof(caps);
6622 hr = object->GetCaps( &caps );
6623 if ( hr == DS_OK ) {
6624 if ( caps.dwChannels > 0 && caps.dwFormats > 0 )
6631 LPDIRECTSOUND object;
6632 hr = DirectSoundCreate( lpguid, &object, NULL );
6633 if ( hr != DS_OK ) return TRUE;
6635 caps.dwSize = sizeof(caps);
6636 hr = object->GetCaps( &caps );
6637 if ( hr == DS_OK ) {
6638 if ( caps.dwFlags & DSCAPS_PRIMARYMONO || caps.dwFlags & DSCAPS_PRIMARYSTEREO )
6644 // If good device, then save its name and guid.
6645 std::string name = convertCharPointerToStdString( description );
6646 //if ( name == "Primary Sound Driver" || name == "Primary Sound Capture Driver" )
6647 if ( lpguid == NULL )
6648 name = "Default Device";
6649 if ( validDevice ) {
6650 for ( unsigned int i=0; i<dsDevices.size(); i++ ) {
6651 if ( dsDevices[i].name == name ) {
6652 dsDevices[i].found = true;
6653 if ( probeInfo.isInput ) {
6654 dsDevices[i].id[1] = lpguid;
6655 dsDevices[i].validId[1] = true;
6658 dsDevices[i].id[0] = lpguid;
6659 dsDevices[i].validId[0] = true;
6667 device.found = true;
6668 if ( probeInfo.isInput ) {
6669 device.id[1] = lpguid;
6670 device.validId[1] = true;
6673 device.id[0] = lpguid;
6674 device.validId[0] = true;
6676 dsDevices.push_back( device );
6682 static const char* getErrorString( int code )
6686 case DSERR_ALLOCATED:
6687 return "Already allocated";
6689 case DSERR_CONTROLUNAVAIL:
6690 return "Control unavailable";
6692 case DSERR_INVALIDPARAM:
6693 return "Invalid parameter";
6695 case DSERR_INVALIDCALL:
6696 return "Invalid call";
6699 return "Generic error";
6701 case DSERR_PRIOLEVELNEEDED:
6702 return "Priority level needed";
6704 case DSERR_OUTOFMEMORY:
6705 return "Out of memory";
6707 case DSERR_BADFORMAT:
6708 return "The sample rate or the channel format is not supported";
6710 case DSERR_UNSUPPORTED:
6711 return "Not supported";
6713 case DSERR_NODRIVER:
6716 case DSERR_ALREADYINITIALIZED:
6717 return "Already initialized";
6719 case DSERR_NOAGGREGATION:
6720 return "No aggregation";
6722 case DSERR_BUFFERLOST:
6723 return "Buffer lost";
6725 case DSERR_OTHERAPPHASPRIO:
6726 return "Another application already has priority";
6728 case DSERR_UNINITIALIZED:
6729 return "Uninitialized";
6732 return "DirectSound unknown error";
6735 //******************** End of __WINDOWS_DS__ *********************//
6739 #if defined(__LINUX_ALSA__)
6741 #include <alsa/asoundlib.h>
6744 // A structure to hold various information related to the ALSA API
6747 snd_pcm_t *handles[2];
6750 pthread_cond_t runnable_cv;
6754 :synchronized(false), runnable(false) { xrun[0] = false; xrun[1] = false; }
6757 static void *alsaCallbackHandler( void * ptr );
6759 RtApiAlsa :: RtApiAlsa()
6761 // Nothing to do here.
6764 RtApiAlsa :: ~RtApiAlsa()
6766 if ( stream_.state != STREAM_CLOSED ) closeStream();
6769 unsigned int RtApiAlsa :: getDeviceCount( void )
6771 unsigned nDevices = 0;
6772 int result, subdevice, card;
6776 // Count cards and devices
6778 snd_card_next( &card );
6779 while ( card >= 0 ) {
6780 sprintf( name, "hw:%d", card );
6781 result = snd_ctl_open( &handle, name, 0 );
6783 errorStream_ << "RtApiAlsa::getDeviceCount: control open, card = " << card << ", " << snd_strerror( result ) << ".";
6784 errorText_ = errorStream_.str();
6785 error( RtAudioError::WARNING );
6790 result = snd_ctl_pcm_next_device( handle, &subdevice );
6792 errorStream_ << "RtApiAlsa::getDeviceCount: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
6793 errorText_ = errorStream_.str();
6794 error( RtAudioError::WARNING );
6797 if ( subdevice < 0 )
6802 snd_ctl_close( handle );
6803 snd_card_next( &card );
6806 result = snd_ctl_open( &handle, "default", 0 );
6809 snd_ctl_close( handle );
6815 RtAudio::DeviceInfo RtApiAlsa :: getDeviceInfo( unsigned int device )
6817 RtAudio::DeviceInfo info;
6818 info.probed = false;
6820 unsigned nDevices = 0;
6821 int result, subdevice, card;
6825 // Count cards and devices
6828 snd_card_next( &card );
6829 while ( card >= 0 ) {
6830 sprintf( name, "hw:%d", card );
6831 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
6833 errorStream_ << "RtApiAlsa::getDeviceInfo: control open, card = " << card << ", " << snd_strerror( result ) << ".";
6834 errorText_ = errorStream_.str();
6835 error( RtAudioError::WARNING );
6840 result = snd_ctl_pcm_next_device( chandle, &subdevice );
6842 errorStream_ << "RtApiAlsa::getDeviceInfo: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
6843 errorText_ = errorStream_.str();
6844 error( RtAudioError::WARNING );
6847 if ( subdevice < 0 ) break;
6848 if ( nDevices == device ) {
6849 sprintf( name, "hw:%d,%d", card, subdevice );
6855 snd_ctl_close( chandle );
6856 snd_card_next( &card );
6859 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
6860 if ( result == 0 ) {
6861 if ( nDevices == device ) {
6862 strcpy( name, "default" );
6868 if ( nDevices == 0 ) {
6869 errorText_ = "RtApiAlsa::getDeviceInfo: no devices found!";
6870 error( RtAudioError::INVALID_USE );
6874 if ( device >= nDevices ) {
6875 errorText_ = "RtApiAlsa::getDeviceInfo: device ID is invalid!";
6876 error( RtAudioError::INVALID_USE );
6882 // If a stream is already open, we cannot probe the stream devices.
6883 // Thus, use the saved results.
6884 if ( stream_.state != STREAM_CLOSED &&
6885 ( stream_.device[0] == device || stream_.device[1] == device ) ) {
6886 snd_ctl_close( chandle );
6887 if ( device >= devices_.size() ) {
6888 errorText_ = "RtApiAlsa::getDeviceInfo: device ID was not present before stream was opened.";
6889 error( RtAudioError::WARNING );
6892 return devices_[ device ];
6895 int openMode = SND_PCM_ASYNC;
6896 snd_pcm_stream_t stream;
6897 snd_pcm_info_t *pcminfo;
6898 snd_pcm_info_alloca( &pcminfo );
6900 snd_pcm_hw_params_t *params;
6901 snd_pcm_hw_params_alloca( ¶ms );
6903 // First try for playback unless default device (which has subdev -1)
6904 stream = SND_PCM_STREAM_PLAYBACK;
6905 snd_pcm_info_set_stream( pcminfo, stream );
6906 if ( subdevice != -1 ) {
6907 snd_pcm_info_set_device( pcminfo, subdevice );
6908 snd_pcm_info_set_subdevice( pcminfo, 0 );
6910 result = snd_ctl_pcm_info( chandle, pcminfo );
6912 // Device probably doesn't support playback.
6917 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK );
6919 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
6920 errorText_ = errorStream_.str();
6921 error( RtAudioError::WARNING );
6925 // The device is open ... fill the parameter structure.
6926 result = snd_pcm_hw_params_any( phandle, params );
6928 snd_pcm_close( phandle );
6929 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
6930 errorText_ = errorStream_.str();
6931 error( RtAudioError::WARNING );
6935 // Get output channel information.
6937 result = snd_pcm_hw_params_get_channels_max( params, &value );
6939 snd_pcm_close( phandle );
6940 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") output channels, " << snd_strerror( result ) << ".";
6941 errorText_ = errorStream_.str();
6942 error( RtAudioError::WARNING );
6945 info.outputChannels = value;
6946 snd_pcm_close( phandle );
6949 stream = SND_PCM_STREAM_CAPTURE;
6950 snd_pcm_info_set_stream( pcminfo, stream );
6952 // Now try for capture unless default device (with subdev = -1)
6953 if ( subdevice != -1 ) {
6954 result = snd_ctl_pcm_info( chandle, pcminfo );
6955 snd_ctl_close( chandle );
6957 // Device probably doesn't support capture.
6958 if ( info.outputChannels == 0 ) return info;
6959 goto probeParameters;
6963 snd_ctl_close( chandle );
6965 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
6967 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
6968 errorText_ = errorStream_.str();
6969 error( RtAudioError::WARNING );
6970 if ( info.outputChannels == 0 ) return info;
6971 goto probeParameters;
6974 // The device is open ... fill the parameter structure.
6975 result = snd_pcm_hw_params_any( phandle, params );
6977 snd_pcm_close( phandle );
6978 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
6979 errorText_ = errorStream_.str();
6980 error( RtAudioError::WARNING );
6981 if ( info.outputChannels == 0 ) return info;
6982 goto probeParameters;
6985 result = snd_pcm_hw_params_get_channels_max( params, &value );
6987 snd_pcm_close( phandle );
6988 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") input channels, " << snd_strerror( result ) << ".";
6989 errorText_ = errorStream_.str();
6990 error( RtAudioError::WARNING );
6991 if ( info.outputChannels == 0 ) return info;
6992 goto probeParameters;
6994 info.inputChannels = value;
6995 snd_pcm_close( phandle );
6997 // If device opens for both playback and capture, we determine the channels.
6998 if ( info.outputChannels > 0 && info.inputChannels > 0 )
6999 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
7001 // ALSA doesn't provide default devices so we'll use the first available one.
7002 if ( device == 0 && info.outputChannels > 0 )
7003 info.isDefaultOutput = true;
7004 if ( device == 0 && info.inputChannels > 0 )
7005 info.isDefaultInput = true;
7008 // At this point, we just need to figure out the supported data
7009 // formats and sample rates. We'll proceed by opening the device in
7010 // the direction with the maximum number of channels, or playback if
7011 // they are equal. This might limit our sample rate options, but so
7014 if ( info.outputChannels >= info.inputChannels )
7015 stream = SND_PCM_STREAM_PLAYBACK;
7017 stream = SND_PCM_STREAM_CAPTURE;
7018 snd_pcm_info_set_stream( pcminfo, stream );
7020 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
7022 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7023 errorText_ = errorStream_.str();
7024 error( RtAudioError::WARNING );
7028 // The device is open ... fill the parameter structure.
7029 result = snd_pcm_hw_params_any( phandle, params );
7031 snd_pcm_close( phandle );
7032 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7033 errorText_ = errorStream_.str();
7034 error( RtAudioError::WARNING );
7038 // Test our discrete set of sample rate values.
7039 info.sampleRates.clear();
7040 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
7041 if ( snd_pcm_hw_params_test_rate( phandle, params, SAMPLE_RATES[i], 0 ) == 0 ) {
7042 info.sampleRates.push_back( SAMPLE_RATES[i] );
7044 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
7045 info.preferredSampleRate = SAMPLE_RATES[i];
7048 if ( info.sampleRates.size() == 0 ) {
7049 snd_pcm_close( phandle );
7050 errorStream_ << "RtApiAlsa::getDeviceInfo: no supported sample rates found for device (" << name << ").";
7051 errorText_ = errorStream_.str();
7052 error( RtAudioError::WARNING );
7056 // Probe the supported data formats ... we don't care about endian-ness just yet
7057 snd_pcm_format_t format;
7058 info.nativeFormats = 0;
7059 format = SND_PCM_FORMAT_S8;
7060 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7061 info.nativeFormats |= RTAUDIO_SINT8;
7062 format = SND_PCM_FORMAT_S16;
7063 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7064 info.nativeFormats |= RTAUDIO_SINT16;
7065 format = SND_PCM_FORMAT_S24;
7066 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7067 info.nativeFormats |= RTAUDIO_SINT24;
7068 format = SND_PCM_FORMAT_S32;
7069 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7070 info.nativeFormats |= RTAUDIO_SINT32;
7071 format = SND_PCM_FORMAT_FLOAT;
7072 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7073 info.nativeFormats |= RTAUDIO_FLOAT32;
7074 format = SND_PCM_FORMAT_FLOAT64;
7075 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7076 info.nativeFormats |= RTAUDIO_FLOAT64;
7078 // Check that we have at least one supported format
7079 if ( info.nativeFormats == 0 ) {
7080 snd_pcm_close( phandle );
7081 errorStream_ << "RtApiAlsa::getDeviceInfo: pcm device (" << name << ") data format not supported by RtAudio.";
7082 errorText_ = errorStream_.str();
7083 error( RtAudioError::WARNING );
7087 // Get the device name
7089 result = snd_card_get_name( card, &cardname );
7090 if ( result >= 0 ) {
7091 sprintf( name, "hw:%s,%d", cardname, subdevice );
7096 // That's all ... close the device and return
7097 snd_pcm_close( phandle );
7102 void RtApiAlsa :: saveDeviceInfo( void )
7106 unsigned int nDevices = getDeviceCount();
7107 devices_.resize( nDevices );
7108 for ( unsigned int i=0; i<nDevices; i++ )
7109 devices_[i] = getDeviceInfo( i );
7112 bool RtApiAlsa :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
7113 unsigned int firstChannel, unsigned int sampleRate,
7114 RtAudioFormat format, unsigned int *bufferSize,
7115 RtAudio::StreamOptions *options )
7118 #if defined(__RTAUDIO_DEBUG__)
7120 snd_output_stdio_attach(&out, stderr, 0);
7123 // I'm not using the "plug" interface ... too much inconsistent behavior.
7125 unsigned nDevices = 0;
7126 int result, subdevice, card;
7130 if ( options && options->flags & RTAUDIO_ALSA_USE_DEFAULT )
7131 snprintf(name, sizeof(name), "%s", "default");
7133 // Count cards and devices
7135 snd_card_next( &card );
7136 while ( card >= 0 ) {
7137 sprintf( name, "hw:%d", card );
7138 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
7140 errorStream_ << "RtApiAlsa::probeDeviceOpen: control open, card = " << card << ", " << snd_strerror( result ) << ".";
7141 errorText_ = errorStream_.str();
7146 result = snd_ctl_pcm_next_device( chandle, &subdevice );
7147 if ( result < 0 ) break;
7148 if ( subdevice < 0 ) break;
7149 if ( nDevices == device ) {
7150 sprintf( name, "hw:%d,%d", card, subdevice );
7151 snd_ctl_close( chandle );
7156 snd_ctl_close( chandle );
7157 snd_card_next( &card );
7160 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
7161 if ( result == 0 ) {
7162 if ( nDevices == device ) {
7163 strcpy( name, "default" );
7169 if ( nDevices == 0 ) {
7170 // This should not happen because a check is made before this function is called.
7171 errorText_ = "RtApiAlsa::probeDeviceOpen: no devices found!";
7175 if ( device >= nDevices ) {
7176 // This should not happen because a check is made before this function is called.
7177 errorText_ = "RtApiAlsa::probeDeviceOpen: device ID is invalid!";
7184 // The getDeviceInfo() function will not work for a device that is
7185 // already open. Thus, we'll probe the system before opening a
7186 // stream and save the results for use by getDeviceInfo().
7187 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) // only do once
7188 this->saveDeviceInfo();
7190 snd_pcm_stream_t stream;
7191 if ( mode == OUTPUT )
7192 stream = SND_PCM_STREAM_PLAYBACK;
7194 stream = SND_PCM_STREAM_CAPTURE;
7197 int openMode = SND_PCM_ASYNC;
7198 result = snd_pcm_open( &phandle, name, stream, openMode );
7200 if ( mode == OUTPUT )
7201 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for output.";
7203 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for input.";
7204 errorText_ = errorStream_.str();
7208 // Fill the parameter structure.
7209 snd_pcm_hw_params_t *hw_params;
7210 snd_pcm_hw_params_alloca( &hw_params );
7211 result = snd_pcm_hw_params_any( phandle, hw_params );
7213 snd_pcm_close( phandle );
7214 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") parameters, " << snd_strerror( result ) << ".";
7215 errorText_ = errorStream_.str();
7219 #if defined(__RTAUDIO_DEBUG__)
7220 fprintf( stderr, "\nRtApiAlsa: dump hardware params just after device open:\n\n" );
7221 snd_pcm_hw_params_dump( hw_params, out );
7224 // Set access ... check user preference.
7225 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) {
7226 stream_.userInterleaved = false;
7227 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
7229 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
7230 stream_.deviceInterleaved[mode] = true;
7233 stream_.deviceInterleaved[mode] = false;
7236 stream_.userInterleaved = true;
7237 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
7239 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
7240 stream_.deviceInterleaved[mode] = false;
7243 stream_.deviceInterleaved[mode] = true;
7247 snd_pcm_close( phandle );
7248 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") access, " << snd_strerror( result ) << ".";
7249 errorText_ = errorStream_.str();
7253 // Determine how to set the device format.
7254 stream_.userFormat = format;
7255 snd_pcm_format_t deviceFormat = SND_PCM_FORMAT_UNKNOWN;
7257 if ( format == RTAUDIO_SINT8 )
7258 deviceFormat = SND_PCM_FORMAT_S8;
7259 else if ( format == RTAUDIO_SINT16 )
7260 deviceFormat = SND_PCM_FORMAT_S16;
7261 else if ( format == RTAUDIO_SINT24 )
7262 deviceFormat = SND_PCM_FORMAT_S24;
7263 else if ( format == RTAUDIO_SINT32 )
7264 deviceFormat = SND_PCM_FORMAT_S32;
7265 else if ( format == RTAUDIO_FLOAT32 )
7266 deviceFormat = SND_PCM_FORMAT_FLOAT;
7267 else if ( format == RTAUDIO_FLOAT64 )
7268 deviceFormat = SND_PCM_FORMAT_FLOAT64;
7270 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat) == 0) {
7271 stream_.deviceFormat[mode] = format;
7275 // The user requested format is not natively supported by the device.
7276 deviceFormat = SND_PCM_FORMAT_FLOAT64;
7277 if ( snd_pcm_hw_params_test_format( phandle, hw_params, deviceFormat ) == 0 ) {
7278 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
7282 deviceFormat = SND_PCM_FORMAT_FLOAT;
7283 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7284 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
7288 deviceFormat = SND_PCM_FORMAT_S32;
7289 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7290 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
7294 deviceFormat = SND_PCM_FORMAT_S24;
7295 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7296 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
7300 deviceFormat = SND_PCM_FORMAT_S16;
7301 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7302 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
7306 deviceFormat = SND_PCM_FORMAT_S8;
7307 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7308 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
7312 // If we get here, no supported format was found.
7313 snd_pcm_close( phandle );
7314 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device " << device << " data format not supported by RtAudio.";
7315 errorText_ = errorStream_.str();
7319 result = snd_pcm_hw_params_set_format( phandle, hw_params, deviceFormat );
7321 snd_pcm_close( phandle );
7322 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") data format, " << snd_strerror( result ) << ".";
7323 errorText_ = errorStream_.str();
7327 // Determine whether byte-swaping is necessary.
7328 stream_.doByteSwap[mode] = false;
7329 if ( deviceFormat != SND_PCM_FORMAT_S8 ) {
7330 result = snd_pcm_format_cpu_endian( deviceFormat );
7332 stream_.doByteSwap[mode] = true;
7333 else if (result < 0) {
7334 snd_pcm_close( phandle );
7335 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") endian-ness, " << snd_strerror( result ) << ".";
7336 errorText_ = errorStream_.str();
7341 // Set the sample rate.
7342 result = snd_pcm_hw_params_set_rate_near( phandle, hw_params, (unsigned int*) &sampleRate, 0 );
7344 snd_pcm_close( phandle );
7345 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting sample rate on device (" << name << "), " << snd_strerror( result ) << ".";
7346 errorText_ = errorStream_.str();
7350 // Determine the number of channels for this device. We support a possible
7351 // minimum device channel number > than the value requested by the user.
7352 stream_.nUserChannels[mode] = channels;
7354 result = snd_pcm_hw_params_get_channels_max( hw_params, &value );
7355 unsigned int deviceChannels = value;
7356 if ( result < 0 || deviceChannels < channels + firstChannel ) {
7357 snd_pcm_close( phandle );
7358 errorStream_ << "RtApiAlsa::probeDeviceOpen: requested channel parameters not supported by device (" << name << "), " << snd_strerror( result ) << ".";
7359 errorText_ = errorStream_.str();
7363 result = snd_pcm_hw_params_get_channels_min( hw_params, &value );
7365 snd_pcm_close( phandle );
7366 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting minimum channels for device (" << name << "), " << snd_strerror( result ) << ".";
7367 errorText_ = errorStream_.str();
7370 deviceChannels = value;
7371 if ( deviceChannels < channels + firstChannel ) deviceChannels = channels + firstChannel;
7372 stream_.nDeviceChannels[mode] = deviceChannels;
7374 // Set the device channels.
7375 result = snd_pcm_hw_params_set_channels( phandle, hw_params, deviceChannels );
7377 snd_pcm_close( phandle );
7378 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting channels for device (" << name << "), " << snd_strerror( result ) << ".";
7379 errorText_ = errorStream_.str();
7383 // Set the buffer (or period) size.
7385 snd_pcm_uframes_t periodSize = *bufferSize;
7386 result = snd_pcm_hw_params_set_period_size_near( phandle, hw_params, &periodSize, &dir );
7388 snd_pcm_close( phandle );
7389 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting period size for device (" << name << "), " << snd_strerror( result ) << ".";
7390 errorText_ = errorStream_.str();
7393 *bufferSize = periodSize;
7395 // Set the buffer number, which in ALSA is referred to as the "period".
7396 unsigned int periods = 0;
7397 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) periods = 2;
7398 if ( options && options->numberOfBuffers > 0 ) periods = options->numberOfBuffers;
7399 if ( periods < 2 ) periods = 4; // a fairly safe default value
7400 result = snd_pcm_hw_params_set_periods_near( phandle, hw_params, &periods, &dir );
7402 snd_pcm_close( phandle );
7403 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting periods for device (" << name << "), " << snd_strerror( result ) << ".";
7404 errorText_ = errorStream_.str();
7408 // If attempting to setup a duplex stream, the bufferSize parameter
7409 // MUST be the same in both directions!
7410 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
7411 snd_pcm_close( phandle );
7412 errorStream_ << "RtApiAlsa::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << name << ").";
7413 errorText_ = errorStream_.str();
7417 stream_.bufferSize = *bufferSize;
7419 // Install the hardware configuration
7420 result = snd_pcm_hw_params( phandle, hw_params );
7422 snd_pcm_close( phandle );
7423 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing hardware configuration on device (" << name << "), " << snd_strerror( result ) << ".";
7424 errorText_ = errorStream_.str();
7428 #if defined(__RTAUDIO_DEBUG__)
7429 fprintf(stderr, "\nRtApiAlsa: dump hardware params after installation:\n\n");
7430 snd_pcm_hw_params_dump( hw_params, out );
7433 // Set the software configuration to fill buffers with zeros and prevent device stopping on xruns.
7434 snd_pcm_sw_params_t *sw_params = NULL;
7435 snd_pcm_sw_params_alloca( &sw_params );
7436 snd_pcm_sw_params_current( phandle, sw_params );
7437 snd_pcm_sw_params_set_start_threshold( phandle, sw_params, *bufferSize );
7438 snd_pcm_sw_params_set_stop_threshold( phandle, sw_params, ULONG_MAX );
7439 snd_pcm_sw_params_set_silence_threshold( phandle, sw_params, 0 );
7441 // The following two settings were suggested by Theo Veenker
7442 //snd_pcm_sw_params_set_avail_min( phandle, sw_params, *bufferSize );
7443 //snd_pcm_sw_params_set_xfer_align( phandle, sw_params, 1 );
7445 // here are two options for a fix
7446 //snd_pcm_sw_params_set_silence_size( phandle, sw_params, ULONG_MAX );
7447 snd_pcm_uframes_t val;
7448 snd_pcm_sw_params_get_boundary( sw_params, &val );
7449 snd_pcm_sw_params_set_silence_size( phandle, sw_params, val );
7451 result = snd_pcm_sw_params( phandle, sw_params );
7453 snd_pcm_close( phandle );
7454 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing software configuration on device (" << name << "), " << snd_strerror( result ) << ".";
7455 errorText_ = errorStream_.str();
7459 #if defined(__RTAUDIO_DEBUG__)
7460 fprintf(stderr, "\nRtApiAlsa: dump software params after installation:\n\n");
7461 snd_pcm_sw_params_dump( sw_params, out );
7464 // Set flags for buffer conversion
7465 stream_.doConvertBuffer[mode] = false;
7466 if ( stream_.userFormat != stream_.deviceFormat[mode] )
7467 stream_.doConvertBuffer[mode] = true;
7468 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
7469 stream_.doConvertBuffer[mode] = true;
7470 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
7471 stream_.nUserChannels[mode] > 1 )
7472 stream_.doConvertBuffer[mode] = true;
7474 // Allocate the ApiHandle if necessary and then save.
7475 AlsaHandle *apiInfo = 0;
7476 if ( stream_.apiHandle == 0 ) {
7478 apiInfo = (AlsaHandle *) new AlsaHandle;
7480 catch ( std::bad_alloc& ) {
7481 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating AlsaHandle memory.";
7485 if ( pthread_cond_init( &apiInfo->runnable_cv, NULL ) ) {
7486 errorText_ = "RtApiAlsa::probeDeviceOpen: error initializing pthread condition variable.";
7490 stream_.apiHandle = (void *) apiInfo;
7491 apiInfo->handles[0] = 0;
7492 apiInfo->handles[1] = 0;
7495 apiInfo = (AlsaHandle *) stream_.apiHandle;
7497 apiInfo->handles[mode] = phandle;
7500 // Allocate necessary internal buffers.
7501 unsigned long bufferBytes;
7502 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
7503 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
7504 if ( stream_.userBuffer[mode] == NULL ) {
7505 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating user buffer memory.";
7509 if ( stream_.doConvertBuffer[mode] ) {
7511 bool makeBuffer = true;
7512 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
7513 if ( mode == INPUT ) {
7514 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
7515 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
7516 if ( bufferBytes <= bytesOut ) makeBuffer = false;
7521 bufferBytes *= *bufferSize;
7522 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
7523 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
7524 if ( stream_.deviceBuffer == NULL ) {
7525 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating device buffer memory.";
7531 stream_.sampleRate = sampleRate;
7532 stream_.nBuffers = periods;
7533 stream_.device[mode] = device;
7534 stream_.state = STREAM_STOPPED;
7536 // Setup the buffer conversion information structure.
7537 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
7539 // Setup thread if necessary.
7540 if ( stream_.mode == OUTPUT && mode == INPUT ) {
7541 // We had already set up an output stream.
7542 stream_.mode = DUPLEX;
7543 // Link the streams if possible.
7544 apiInfo->synchronized = false;
7545 if ( snd_pcm_link( apiInfo->handles[0], apiInfo->handles[1] ) == 0 )
7546 apiInfo->synchronized = true;
7548 errorText_ = "RtApiAlsa::probeDeviceOpen: unable to synchronize input and output devices.";
7549 error( RtAudioError::WARNING );
7553 stream_.mode = mode;
7555 // Setup callback thread.
7556 stream_.callbackInfo.object = (void *) this;
7558 // Set the thread attributes for joinable and realtime scheduling
7559 // priority (optional). The higher priority will only take affect
7560 // if the program is run as root or suid. Note, under Linux
7561 // processes with CAP_SYS_NICE privilege, a user can change
7562 // scheduling policy and priority (thus need not be root). See
7563 // POSIX "capabilities".
7564 pthread_attr_t attr;
7565 pthread_attr_init( &attr );
7566 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
7567 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
7568 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
7569 stream_.callbackInfo.doRealtime = true;
7570 struct sched_param param;
7571 int priority = options->priority;
7572 int min = sched_get_priority_min( SCHED_RR );
7573 int max = sched_get_priority_max( SCHED_RR );
7574 if ( priority < min ) priority = min;
7575 else if ( priority > max ) priority = max;
7576 param.sched_priority = priority;
7578 // Set the policy BEFORE the priority. Otherwise it fails.
7579 pthread_attr_setschedpolicy(&attr, SCHED_RR);
7580 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
7581 // This is definitely required. Otherwise it fails.
7582 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
7583 pthread_attr_setschedparam(&attr, ¶m);
7586 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
7588 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
7591 stream_.callbackInfo.isRunning = true;
7592 result = pthread_create( &stream_.callbackInfo.thread, &attr, alsaCallbackHandler, &stream_.callbackInfo );
7593 pthread_attr_destroy( &attr );
7595 // Failed. Try instead with default attributes.
7596 result = pthread_create( &stream_.callbackInfo.thread, NULL, alsaCallbackHandler, &stream_.callbackInfo );
7598 stream_.callbackInfo.isRunning = false;
7599 errorText_ = "RtApiAlsa::error creating callback thread!";
7609 pthread_cond_destroy( &apiInfo->runnable_cv );
7610 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
7611 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
7613 stream_.apiHandle = 0;
7616 if ( phandle) snd_pcm_close( phandle );
7618 for ( int i=0; i<2; i++ ) {
7619 if ( stream_.userBuffer[i] ) {
7620 free( stream_.userBuffer[i] );
7621 stream_.userBuffer[i] = 0;
7625 if ( stream_.deviceBuffer ) {
7626 free( stream_.deviceBuffer );
7627 stream_.deviceBuffer = 0;
7630 stream_.state = STREAM_CLOSED;
7634 void RtApiAlsa :: closeStream()
7636 if ( stream_.state == STREAM_CLOSED ) {
7637 errorText_ = "RtApiAlsa::closeStream(): no open stream to close!";
7638 error( RtAudioError::WARNING );
7642 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
7643 stream_.callbackInfo.isRunning = false;
7644 MUTEX_LOCK( &stream_.mutex );
7645 if ( stream_.state == STREAM_STOPPED ) {
7646 apiInfo->runnable = true;
7647 pthread_cond_signal( &apiInfo->runnable_cv );
7649 MUTEX_UNLOCK( &stream_.mutex );
7650 pthread_join( stream_.callbackInfo.thread, NULL );
7652 if ( stream_.state == STREAM_RUNNING ) {
7653 stream_.state = STREAM_STOPPED;
7654 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
7655 snd_pcm_drop( apiInfo->handles[0] );
7656 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
7657 snd_pcm_drop( apiInfo->handles[1] );
7661 pthread_cond_destroy( &apiInfo->runnable_cv );
7662 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
7663 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
7665 stream_.apiHandle = 0;
7668 for ( int i=0; i<2; i++ ) {
7669 if ( stream_.userBuffer[i] ) {
7670 free( stream_.userBuffer[i] );
7671 stream_.userBuffer[i] = 0;
7675 if ( stream_.deviceBuffer ) {
7676 free( stream_.deviceBuffer );
7677 stream_.deviceBuffer = 0;
7680 stream_.mode = UNINITIALIZED;
7681 stream_.state = STREAM_CLOSED;
7684 void RtApiAlsa :: startStream()
7686 // This method calls snd_pcm_prepare if the device isn't already in that state.
7689 if ( stream_.state == STREAM_RUNNING ) {
7690 errorText_ = "RtApiAlsa::startStream(): the stream is already running!";
7691 error( RtAudioError::WARNING );
7695 MUTEX_LOCK( &stream_.mutex );
7698 snd_pcm_state_t state;
7699 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
7700 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
7701 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
7702 state = snd_pcm_state( handle[0] );
7703 if ( state != SND_PCM_STATE_PREPARED ) {
7704 result = snd_pcm_prepare( handle[0] );
7706 errorStream_ << "RtApiAlsa::startStream: error preparing output pcm device, " << snd_strerror( result ) << ".";
7707 errorText_ = errorStream_.str();
7713 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
7714 result = snd_pcm_drop(handle[1]); // fix to remove stale data received since device has been open
7715 state = snd_pcm_state( handle[1] );
7716 if ( state != SND_PCM_STATE_PREPARED ) {
7717 result = snd_pcm_prepare( handle[1] );
7719 errorStream_ << "RtApiAlsa::startStream: error preparing input pcm device, " << snd_strerror( result ) << ".";
7720 errorText_ = errorStream_.str();
7726 stream_.state = STREAM_RUNNING;
7729 apiInfo->runnable = true;
7730 pthread_cond_signal( &apiInfo->runnable_cv );
7731 MUTEX_UNLOCK( &stream_.mutex );
7733 if ( result >= 0 ) return;
7734 error( RtAudioError::SYSTEM_ERROR );
7737 void RtApiAlsa :: stopStream()
7740 if ( stream_.state == STREAM_STOPPED ) {
7741 errorText_ = "RtApiAlsa::stopStream(): the stream is already stopped!";
7742 error( RtAudioError::WARNING );
7746 stream_.state = STREAM_STOPPED;
7747 MUTEX_LOCK( &stream_.mutex );
7750 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
7751 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
7752 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
7753 if ( apiInfo->synchronized )
7754 result = snd_pcm_drop( handle[0] );
7756 result = snd_pcm_drain( handle[0] );
7758 errorStream_ << "RtApiAlsa::stopStream: error draining output pcm device, " << snd_strerror( result ) << ".";
7759 errorText_ = errorStream_.str();
7764 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
7765 result = snd_pcm_drop( handle[1] );
7767 errorStream_ << "RtApiAlsa::stopStream: error stopping input pcm device, " << snd_strerror( result ) << ".";
7768 errorText_ = errorStream_.str();
7774 apiInfo->runnable = false; // fixes high CPU usage when stopped
7775 MUTEX_UNLOCK( &stream_.mutex );
7777 if ( result >= 0 ) return;
7778 error( RtAudioError::SYSTEM_ERROR );
7781 void RtApiAlsa :: abortStream()
7784 if ( stream_.state == STREAM_STOPPED ) {
7785 errorText_ = "RtApiAlsa::abortStream(): the stream is already stopped!";
7786 error( RtAudioError::WARNING );
7790 stream_.state = STREAM_STOPPED;
7791 MUTEX_LOCK( &stream_.mutex );
7794 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
7795 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
7796 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
7797 result = snd_pcm_drop( handle[0] );
7799 errorStream_ << "RtApiAlsa::abortStream: error aborting output pcm device, " << snd_strerror( result ) << ".";
7800 errorText_ = errorStream_.str();
7805 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
7806 result = snd_pcm_drop( handle[1] );
7808 errorStream_ << "RtApiAlsa::abortStream: error aborting input pcm device, " << snd_strerror( result ) << ".";
7809 errorText_ = errorStream_.str();
7815 apiInfo->runnable = false; // fixes high CPU usage when stopped
7816 MUTEX_UNLOCK( &stream_.mutex );
7818 if ( result >= 0 ) return;
7819 error( RtAudioError::SYSTEM_ERROR );
7822 void RtApiAlsa :: callbackEvent()
7824 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
7825 if ( stream_.state == STREAM_STOPPED ) {
7826 MUTEX_LOCK( &stream_.mutex );
7827 while ( !apiInfo->runnable )
7828 pthread_cond_wait( &apiInfo->runnable_cv, &stream_.mutex );
7830 if ( stream_.state != STREAM_RUNNING ) {
7831 MUTEX_UNLOCK( &stream_.mutex );
7834 MUTEX_UNLOCK( &stream_.mutex );
7837 if ( stream_.state == STREAM_CLOSED ) {
7838 errorText_ = "RtApiAlsa::callbackEvent(): the stream is closed ... this shouldn't happen!";
7839 error( RtAudioError::WARNING );
7843 int doStopStream = 0;
7844 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
7845 double streamTime = getStreamTime();
7846 RtAudioStreamStatus status = 0;
7847 if ( stream_.mode != INPUT && apiInfo->xrun[0] == true ) {
7848 status |= RTAUDIO_OUTPUT_UNDERFLOW;
7849 apiInfo->xrun[0] = false;
7851 if ( stream_.mode != OUTPUT && apiInfo->xrun[1] == true ) {
7852 status |= RTAUDIO_INPUT_OVERFLOW;
7853 apiInfo->xrun[1] = false;
7855 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
7856 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
7858 if ( doStopStream == 2 ) {
7863 MUTEX_LOCK( &stream_.mutex );
7865 // The state might change while waiting on a mutex.
7866 if ( stream_.state == STREAM_STOPPED ) goto unlock;
7872 snd_pcm_sframes_t frames;
7873 RtAudioFormat format;
7874 handle = (snd_pcm_t **) apiInfo->handles;
7876 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
7878 // Setup parameters.
7879 if ( stream_.doConvertBuffer[1] ) {
7880 buffer = stream_.deviceBuffer;
7881 channels = stream_.nDeviceChannels[1];
7882 format = stream_.deviceFormat[1];
7885 buffer = stream_.userBuffer[1];
7886 channels = stream_.nUserChannels[1];
7887 format = stream_.userFormat;
7890 // Read samples from device in interleaved/non-interleaved format.
7891 if ( stream_.deviceInterleaved[1] )
7892 result = snd_pcm_readi( handle[1], buffer, stream_.bufferSize );
7894 void *bufs[channels];
7895 size_t offset = stream_.bufferSize * formatBytes( format );
7896 for ( int i=0; i<channels; i++ )
7897 bufs[i] = (void *) (buffer + (i * offset));
7898 result = snd_pcm_readn( handle[1], bufs, stream_.bufferSize );
7901 if ( result < (int) stream_.bufferSize ) {
7902 // Either an error or overrun occured.
7903 if ( result == -EPIPE ) {
7904 snd_pcm_state_t state = snd_pcm_state( handle[1] );
7905 if ( state == SND_PCM_STATE_XRUN ) {
7906 apiInfo->xrun[1] = true;
7907 result = snd_pcm_prepare( handle[1] );
7909 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after overrun, " << snd_strerror( result ) << ".";
7910 errorText_ = errorStream_.str();
7914 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
7915 errorText_ = errorStream_.str();
7919 errorStream_ << "RtApiAlsa::callbackEvent: audio read error, " << snd_strerror( result ) << ".";
7920 errorText_ = errorStream_.str();
7922 error( RtAudioError::WARNING );
7926 // Do byte swapping if necessary.
7927 if ( stream_.doByteSwap[1] )
7928 byteSwapBuffer( buffer, stream_.bufferSize * channels, format );
7930 // Do buffer conversion if necessary.
7931 if ( stream_.doConvertBuffer[1] )
7932 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
7934 // Check stream latency
7935 result = snd_pcm_delay( handle[1], &frames );
7936 if ( result == 0 && frames > 0 ) stream_.latency[1] = frames;
7941 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
7943 // Setup parameters and do buffer conversion if necessary.
7944 if ( stream_.doConvertBuffer[0] ) {
7945 buffer = stream_.deviceBuffer;
7946 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
7947 channels = stream_.nDeviceChannels[0];
7948 format = stream_.deviceFormat[0];
7951 buffer = stream_.userBuffer[0];
7952 channels = stream_.nUserChannels[0];
7953 format = stream_.userFormat;
7956 // Do byte swapping if necessary.
7957 if ( stream_.doByteSwap[0] )
7958 byteSwapBuffer(buffer, stream_.bufferSize * channels, format);
7960 // Write samples to device in interleaved/non-interleaved format.
7961 if ( stream_.deviceInterleaved[0] )
7962 result = snd_pcm_writei( handle[0], buffer, stream_.bufferSize );
7964 void *bufs[channels];
7965 size_t offset = stream_.bufferSize * formatBytes( format );
7966 for ( int i=0; i<channels; i++ )
7967 bufs[i] = (void *) (buffer + (i * offset));
7968 result = snd_pcm_writen( handle[0], bufs, stream_.bufferSize );
7971 if ( result < (int) stream_.bufferSize ) {
7972 // Either an error or underrun occured.
7973 if ( result == -EPIPE ) {
7974 snd_pcm_state_t state = snd_pcm_state( handle[0] );
7975 if ( state == SND_PCM_STATE_XRUN ) {
7976 apiInfo->xrun[0] = true;
7977 result = snd_pcm_prepare( handle[0] );
7979 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after underrun, " << snd_strerror( result ) << ".";
7980 errorText_ = errorStream_.str();
7983 errorText_ = "RtApiAlsa::callbackEvent: audio write error, underrun.";
7986 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
7987 errorText_ = errorStream_.str();
7991 errorStream_ << "RtApiAlsa::callbackEvent: audio write error, " << snd_strerror( result ) << ".";
7992 errorText_ = errorStream_.str();
7994 error( RtAudioError::WARNING );
7998 // Check stream latency
7999 result = snd_pcm_delay( handle[0], &frames );
8000 if ( result == 0 && frames > 0 ) stream_.latency[0] = frames;
8004 MUTEX_UNLOCK( &stream_.mutex );
8006 RtApi::tickStreamTime();
8007 if ( doStopStream == 1 ) this->stopStream();
8010 static void *alsaCallbackHandler( void *ptr )
8012 CallbackInfo *info = (CallbackInfo *) ptr;
8013 RtApiAlsa *object = (RtApiAlsa *) info->object;
8014 bool *isRunning = &info->isRunning;
8016 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
8017 if ( info->doRealtime ) {
8018 std::cerr << "RtAudio alsa: " <<
8019 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
8020 "running realtime scheduling" << std::endl;
8024 while ( *isRunning == true ) {
8025 pthread_testcancel();
8026 object->callbackEvent();
8029 pthread_exit( NULL );
8032 //******************** End of __LINUX_ALSA__ *********************//
8035 #if defined(__LINUX_PULSE__)
8037 // Code written by Peter Meerwald, pmeerw@pmeerw.net
8038 // and Tristan Matthews.
8040 #include <pulse/error.h>
8041 #include <pulse/simple.h>
8044 static const unsigned int SUPPORTED_SAMPLERATES[] = { 8000, 16000, 22050, 32000,
8045 44100, 48000, 96000, 0};
8047 struct rtaudio_pa_format_mapping_t {
8048 RtAudioFormat rtaudio_format;
8049 pa_sample_format_t pa_format;
8052 static const rtaudio_pa_format_mapping_t supported_sampleformats[] = {
8053 {RTAUDIO_SINT16, PA_SAMPLE_S16LE},
8054 {RTAUDIO_SINT32, PA_SAMPLE_S32LE},
8055 {RTAUDIO_FLOAT32, PA_SAMPLE_FLOAT32LE},
8056 {0, PA_SAMPLE_INVALID}};
8058 struct PulseAudioHandle {
8062 pthread_cond_t runnable_cv;
8064 PulseAudioHandle() : s_play(0), s_rec(0), runnable(false) { }
8067 RtApiPulse::~RtApiPulse()
8069 if ( stream_.state != STREAM_CLOSED )
8073 unsigned int RtApiPulse::getDeviceCount( void )
8078 RtAudio::DeviceInfo RtApiPulse::getDeviceInfo( unsigned int /*device*/ )
8080 RtAudio::DeviceInfo info;
8082 info.name = "PulseAudio";
8083 info.outputChannels = 2;
8084 info.inputChannels = 2;
8085 info.duplexChannels = 2;
8086 info.isDefaultOutput = true;
8087 info.isDefaultInput = true;
8089 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr )
8090 info.sampleRates.push_back( *sr );
8092 info.preferredSampleRate = 48000;
8093 info.nativeFormats = RTAUDIO_SINT16 | RTAUDIO_SINT32 | RTAUDIO_FLOAT32;
8098 static void *pulseaudio_callback( void * user )
8100 CallbackInfo *cbi = static_cast<CallbackInfo *>( user );
8101 RtApiPulse *context = static_cast<RtApiPulse *>( cbi->object );
8102 volatile bool *isRunning = &cbi->isRunning;
8104 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
8105 if (cbi->doRealtime) {
8106 std::cerr << "RtAudio pulse: " <<
8107 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
8108 "running realtime scheduling" << std::endl;
8112 while ( *isRunning ) {
8113 pthread_testcancel();
8114 context->callbackEvent();
8117 pthread_exit( NULL );
8120 void RtApiPulse::closeStream( void )
8122 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8124 stream_.callbackInfo.isRunning = false;
8126 MUTEX_LOCK( &stream_.mutex );
8127 if ( stream_.state == STREAM_STOPPED ) {
8128 pah->runnable = true;
8129 pthread_cond_signal( &pah->runnable_cv );
8131 MUTEX_UNLOCK( &stream_.mutex );
8133 pthread_join( pah->thread, 0 );
8134 if ( pah->s_play ) {
8135 pa_simple_flush( pah->s_play, NULL );
8136 pa_simple_free( pah->s_play );
8139 pa_simple_free( pah->s_rec );
8141 pthread_cond_destroy( &pah->runnable_cv );
8143 stream_.apiHandle = 0;
8146 if ( stream_.userBuffer[0] ) {
8147 free( stream_.userBuffer[0] );
8148 stream_.userBuffer[0] = 0;
8150 if ( stream_.userBuffer[1] ) {
8151 free( stream_.userBuffer[1] );
8152 stream_.userBuffer[1] = 0;
8155 stream_.state = STREAM_CLOSED;
8156 stream_.mode = UNINITIALIZED;
8159 void RtApiPulse::callbackEvent( void )
8161 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8163 if ( stream_.state == STREAM_STOPPED ) {
8164 MUTEX_LOCK( &stream_.mutex );
8165 while ( !pah->runnable )
8166 pthread_cond_wait( &pah->runnable_cv, &stream_.mutex );
8168 if ( stream_.state != STREAM_RUNNING ) {
8169 MUTEX_UNLOCK( &stream_.mutex );
8172 MUTEX_UNLOCK( &stream_.mutex );
8175 if ( stream_.state == STREAM_CLOSED ) {
8176 errorText_ = "RtApiPulse::callbackEvent(): the stream is closed ... "
8177 "this shouldn't happen!";
8178 error( RtAudioError::WARNING );
8182 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
8183 double streamTime = getStreamTime();
8184 RtAudioStreamStatus status = 0;
8185 int doStopStream = callback( stream_.userBuffer[OUTPUT], stream_.userBuffer[INPUT],
8186 stream_.bufferSize, streamTime, status,
8187 stream_.callbackInfo.userData );
8189 if ( doStopStream == 2 ) {
8194 MUTEX_LOCK( &stream_.mutex );
8195 void *pulse_in = stream_.doConvertBuffer[INPUT] ? stream_.deviceBuffer : stream_.userBuffer[INPUT];
8196 void *pulse_out = stream_.doConvertBuffer[OUTPUT] ? stream_.deviceBuffer : stream_.userBuffer[OUTPUT];
8198 if ( stream_.state != STREAM_RUNNING )
8203 if (stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8204 if ( stream_.doConvertBuffer[OUTPUT] ) {
8205 convertBuffer( stream_.deviceBuffer,
8206 stream_.userBuffer[OUTPUT],
8207 stream_.convertInfo[OUTPUT] );
8208 bytes = stream_.nDeviceChannels[OUTPUT] * stream_.bufferSize *
8209 formatBytes( stream_.deviceFormat[OUTPUT] );
8211 bytes = stream_.nUserChannels[OUTPUT] * stream_.bufferSize *
8212 formatBytes( stream_.userFormat );
8214 if ( pa_simple_write( pah->s_play, pulse_out, bytes, &pa_error ) < 0 ) {
8215 errorStream_ << "RtApiPulse::callbackEvent: audio write error, " <<
8216 pa_strerror( pa_error ) << ".";
8217 errorText_ = errorStream_.str();
8218 error( RtAudioError::WARNING );
8222 if ( stream_.mode == INPUT || stream_.mode == DUPLEX) {
8223 if ( stream_.doConvertBuffer[INPUT] )
8224 bytes = stream_.nDeviceChannels[INPUT] * stream_.bufferSize *
8225 formatBytes( stream_.deviceFormat[INPUT] );
8227 bytes = stream_.nUserChannels[INPUT] * stream_.bufferSize *
8228 formatBytes( stream_.userFormat );
8230 if ( pa_simple_read( pah->s_rec, pulse_in, bytes, &pa_error ) < 0 ) {
8231 errorStream_ << "RtApiPulse::callbackEvent: audio read error, " <<
8232 pa_strerror( pa_error ) << ".";
8233 errorText_ = errorStream_.str();
8234 error( RtAudioError::WARNING );
8236 if ( stream_.doConvertBuffer[INPUT] ) {
8237 convertBuffer( stream_.userBuffer[INPUT],
8238 stream_.deviceBuffer,
8239 stream_.convertInfo[INPUT] );
8244 MUTEX_UNLOCK( &stream_.mutex );
8245 RtApi::tickStreamTime();
8247 if ( doStopStream == 1 )
8251 void RtApiPulse::startStream( void )
8253 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8255 if ( stream_.state == STREAM_CLOSED ) {
8256 errorText_ = "RtApiPulse::startStream(): the stream is not open!";
8257 error( RtAudioError::INVALID_USE );
8260 if ( stream_.state == STREAM_RUNNING ) {
8261 errorText_ = "RtApiPulse::startStream(): the stream is already running!";
8262 error( RtAudioError::WARNING );
8266 MUTEX_LOCK( &stream_.mutex );
8268 stream_.state = STREAM_RUNNING;
8270 pah->runnable = true;
8271 pthread_cond_signal( &pah->runnable_cv );
8272 MUTEX_UNLOCK( &stream_.mutex );
8275 void RtApiPulse::stopStream( void )
8277 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8279 if ( stream_.state == STREAM_CLOSED ) {
8280 errorText_ = "RtApiPulse::stopStream(): the stream is not open!";
8281 error( RtAudioError::INVALID_USE );
8284 if ( stream_.state == STREAM_STOPPED ) {
8285 errorText_ = "RtApiPulse::stopStream(): the stream is already stopped!";
8286 error( RtAudioError::WARNING );
8290 stream_.state = STREAM_STOPPED;
8291 MUTEX_LOCK( &stream_.mutex );
8293 if ( pah && pah->s_play ) {
8295 if ( pa_simple_drain( pah->s_play, &pa_error ) < 0 ) {
8296 errorStream_ << "RtApiPulse::stopStream: error draining output device, " <<
8297 pa_strerror( pa_error ) << ".";
8298 errorText_ = errorStream_.str();
8299 MUTEX_UNLOCK( &stream_.mutex );
8300 error( RtAudioError::SYSTEM_ERROR );
8305 stream_.state = STREAM_STOPPED;
8306 MUTEX_UNLOCK( &stream_.mutex );
8309 void RtApiPulse::abortStream( void )
8311 PulseAudioHandle *pah = static_cast<PulseAudioHandle*>( stream_.apiHandle );
8313 if ( stream_.state == STREAM_CLOSED ) {
8314 errorText_ = "RtApiPulse::abortStream(): the stream is not open!";
8315 error( RtAudioError::INVALID_USE );
8318 if ( stream_.state == STREAM_STOPPED ) {
8319 errorText_ = "RtApiPulse::abortStream(): the stream is already stopped!";
8320 error( RtAudioError::WARNING );
8324 stream_.state = STREAM_STOPPED;
8325 MUTEX_LOCK( &stream_.mutex );
8327 if ( pah && pah->s_play ) {
8329 if ( pa_simple_flush( pah->s_play, &pa_error ) < 0 ) {
8330 errorStream_ << "RtApiPulse::abortStream: error flushing output device, " <<
8331 pa_strerror( pa_error ) << ".";
8332 errorText_ = errorStream_.str();
8333 MUTEX_UNLOCK( &stream_.mutex );
8334 error( RtAudioError::SYSTEM_ERROR );
8339 stream_.state = STREAM_STOPPED;
8340 MUTEX_UNLOCK( &stream_.mutex );
8343 bool RtApiPulse::probeDeviceOpen( unsigned int device, StreamMode mode,
8344 unsigned int channels, unsigned int firstChannel,
8345 unsigned int sampleRate, RtAudioFormat format,
8346 unsigned int *bufferSize, RtAudio::StreamOptions *options )
8348 PulseAudioHandle *pah = 0;
8349 unsigned long bufferBytes = 0;
8352 if ( device != 0 ) return false;
8353 if ( mode != INPUT && mode != OUTPUT ) return false;
8354 if ( channels != 1 && channels != 2 ) {
8355 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported number of channels.";
8358 ss.channels = channels;
8360 if ( firstChannel != 0 ) return false;
8362 bool sr_found = false;
8363 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr ) {
8364 if ( sampleRate == *sr ) {
8366 stream_.sampleRate = sampleRate;
8367 ss.rate = sampleRate;
8372 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported sample rate.";
8377 for ( const rtaudio_pa_format_mapping_t *sf = supported_sampleformats;
8378 sf->rtaudio_format && sf->pa_format != PA_SAMPLE_INVALID; ++sf ) {
8379 if ( format == sf->rtaudio_format ) {
8381 stream_.userFormat = sf->rtaudio_format;
8382 stream_.deviceFormat[mode] = stream_.userFormat;
8383 ss.format = sf->pa_format;
8387 if ( !sf_found ) { // Use internal data format conversion.
8388 stream_.userFormat = format;
8389 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
8390 ss.format = PA_SAMPLE_FLOAT32LE;
8393 // Set other stream parameters.
8394 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
8395 else stream_.userInterleaved = true;
8396 stream_.deviceInterleaved[mode] = true;
8397 stream_.nBuffers = 1;
8398 stream_.doByteSwap[mode] = false;
8399 stream_.nUserChannels[mode] = channels;
8400 stream_.nDeviceChannels[mode] = channels + firstChannel;
8401 stream_.channelOffset[mode] = 0;
8402 std::string streamName = "RtAudio";
8404 // Set flags for buffer conversion.
8405 stream_.doConvertBuffer[mode] = false;
8406 if ( stream_.userFormat != stream_.deviceFormat[mode] )
8407 stream_.doConvertBuffer[mode] = true;
8408 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
8409 stream_.doConvertBuffer[mode] = true;
8411 // Allocate necessary internal buffers.
8412 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
8413 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
8414 if ( stream_.userBuffer[mode] == NULL ) {
8415 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating user buffer memory.";
8418 stream_.bufferSize = *bufferSize;
8420 if ( stream_.doConvertBuffer[mode] ) {
8422 bool makeBuffer = true;
8423 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
8424 if ( mode == INPUT ) {
8425 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
8426 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
8427 if ( bufferBytes <= bytesOut ) makeBuffer = false;
8432 bufferBytes *= *bufferSize;
8433 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
8434 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
8435 if ( stream_.deviceBuffer == NULL ) {
8436 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating device buffer memory.";
8442 stream_.device[mode] = device;
8444 // Setup the buffer conversion information structure.
8445 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
8447 if ( !stream_.apiHandle ) {
8448 PulseAudioHandle *pah = new PulseAudioHandle;
8450 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating memory for handle.";
8454 stream_.apiHandle = pah;
8455 if ( pthread_cond_init( &pah->runnable_cv, NULL ) != 0 ) {
8456 errorText_ = "RtApiPulse::probeDeviceOpen: error creating condition variable.";
8460 pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8463 if ( options && !options->streamName.empty() ) streamName = options->streamName;
8466 pa_buffer_attr buffer_attr;
8467 buffer_attr.fragsize = bufferBytes;
8468 buffer_attr.maxlength = -1;
8470 pah->s_rec = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_RECORD, NULL, "Record", &ss, NULL, &buffer_attr, &error );
8471 if ( !pah->s_rec ) {
8472 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting input to PulseAudio server.";
8477 pah->s_play = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_PLAYBACK, NULL, "Playback", &ss, NULL, NULL, &error );
8478 if ( !pah->s_play ) {
8479 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting output to PulseAudio server.";
8487 if ( stream_.mode == UNINITIALIZED )
8488 stream_.mode = mode;
8489 else if ( stream_.mode == mode )
8492 stream_.mode = DUPLEX;
8494 if ( !stream_.callbackInfo.isRunning ) {
8495 stream_.callbackInfo.object = this;
8497 stream_.state = STREAM_STOPPED;
8498 // Set the thread attributes for joinable and realtime scheduling
8499 // priority (optional). The higher priority will only take affect
8500 // if the program is run as root or suid. Note, under Linux
8501 // processes with CAP_SYS_NICE privilege, a user can change
8502 // scheduling policy and priority (thus need not be root). See
8503 // POSIX "capabilities".
8504 pthread_attr_t attr;
8505 pthread_attr_init( &attr );
8506 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
8507 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
8508 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
8509 stream_.callbackInfo.doRealtime = true;
8510 struct sched_param param;
8511 int priority = options->priority;
8512 int min = sched_get_priority_min( SCHED_RR );
8513 int max = sched_get_priority_max( SCHED_RR );
8514 if ( priority < min ) priority = min;
8515 else if ( priority > max ) priority = max;
8516 param.sched_priority = priority;
8518 // Set the policy BEFORE the priority. Otherwise it fails.
8519 pthread_attr_setschedpolicy(&attr, SCHED_RR);
8520 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
8521 // This is definitely required. Otherwise it fails.
8522 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
8523 pthread_attr_setschedparam(&attr, ¶m);
8526 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
8528 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
8531 stream_.callbackInfo.isRunning = true;
8532 int result = pthread_create( &pah->thread, &attr, pulseaudio_callback, (void *)&stream_.callbackInfo);
8533 pthread_attr_destroy(&attr);
8535 // Failed. Try instead with default attributes.
8536 result = pthread_create( &pah->thread, NULL, pulseaudio_callback, (void *)&stream_.callbackInfo);
8538 stream_.callbackInfo.isRunning = false;
8539 errorText_ = "RtApiPulse::probeDeviceOpen: error creating thread.";
8548 if ( pah && stream_.callbackInfo.isRunning ) {
8549 pthread_cond_destroy( &pah->runnable_cv );
8551 stream_.apiHandle = 0;
8554 for ( int i=0; i<2; i++ ) {
8555 if ( stream_.userBuffer[i] ) {
8556 free( stream_.userBuffer[i] );
8557 stream_.userBuffer[i] = 0;
8561 if ( stream_.deviceBuffer ) {
8562 free( stream_.deviceBuffer );
8563 stream_.deviceBuffer = 0;
8566 stream_.state = STREAM_CLOSED;
8570 //******************** End of __LINUX_PULSE__ *********************//
8573 #if defined(__LINUX_OSS__)
8576 #include <sys/ioctl.h>
8579 #include <sys/soundcard.h>
8583 static void *ossCallbackHandler(void * ptr);
8585 // A structure to hold various information related to the OSS API
8588 int id[2]; // device ids
8591 pthread_cond_t runnable;
8594 :triggered(false) { id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
8597 RtApiOss :: RtApiOss()
8599 // Nothing to do here.
8602 RtApiOss :: ~RtApiOss()
8604 if ( stream_.state != STREAM_CLOSED ) closeStream();
8607 unsigned int RtApiOss :: getDeviceCount( void )
8609 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
8610 if ( mixerfd == -1 ) {
8611 errorText_ = "RtApiOss::getDeviceCount: error opening '/dev/mixer'.";
8612 error( RtAudioError::WARNING );
8616 oss_sysinfo sysinfo;
8617 if ( ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo ) == -1 ) {
8619 errorText_ = "RtApiOss::getDeviceCount: error getting sysinfo, OSS version >= 4.0 is required.";
8620 error( RtAudioError::WARNING );
8625 return sysinfo.numaudios;
8628 RtAudio::DeviceInfo RtApiOss :: getDeviceInfo( unsigned int device )
8630 RtAudio::DeviceInfo info;
8631 info.probed = false;
8633 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
8634 if ( mixerfd == -1 ) {
8635 errorText_ = "RtApiOss::getDeviceInfo: error opening '/dev/mixer'.";
8636 error( RtAudioError::WARNING );
8640 oss_sysinfo sysinfo;
8641 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
8642 if ( result == -1 ) {
8644 errorText_ = "RtApiOss::getDeviceInfo: error getting sysinfo, OSS version >= 4.0 is required.";
8645 error( RtAudioError::WARNING );
8649 unsigned nDevices = sysinfo.numaudios;
8650 if ( nDevices == 0 ) {
8652 errorText_ = "RtApiOss::getDeviceInfo: no devices found!";
8653 error( RtAudioError::INVALID_USE );
8657 if ( device >= nDevices ) {
8659 errorText_ = "RtApiOss::getDeviceInfo: device ID is invalid!";
8660 error( RtAudioError::INVALID_USE );
8664 oss_audioinfo ainfo;
8666 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
8668 if ( result == -1 ) {
8669 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
8670 errorText_ = errorStream_.str();
8671 error( RtAudioError::WARNING );
8676 if ( ainfo.caps & PCM_CAP_OUTPUT ) info.outputChannels = ainfo.max_channels;
8677 if ( ainfo.caps & PCM_CAP_INPUT ) info.inputChannels = ainfo.max_channels;
8678 if ( ainfo.caps & PCM_CAP_DUPLEX ) {
8679 if ( info.outputChannels > 0 && info.inputChannels > 0 && ainfo.caps & PCM_CAP_DUPLEX )
8680 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
8683 // Probe data formats ... do for input
8684 unsigned long mask = ainfo.iformats;
8685 if ( mask & AFMT_S16_LE || mask & AFMT_S16_BE )
8686 info.nativeFormats |= RTAUDIO_SINT16;
8687 if ( mask & AFMT_S8 )
8688 info.nativeFormats |= RTAUDIO_SINT8;
8689 if ( mask & AFMT_S32_LE || mask & AFMT_S32_BE )
8690 info.nativeFormats |= RTAUDIO_SINT32;
8692 if ( mask & AFMT_FLOAT )
8693 info.nativeFormats |= RTAUDIO_FLOAT32;
8695 if ( mask & AFMT_S24_LE || mask & AFMT_S24_BE )
8696 info.nativeFormats |= RTAUDIO_SINT24;
8698 // Check that we have at least one supported format
8699 if ( info.nativeFormats == 0 ) {
8700 errorStream_ << "RtApiOss::getDeviceInfo: device (" << ainfo.name << ") data format not supported by RtAudio.";
8701 errorText_ = errorStream_.str();
8702 error( RtAudioError::WARNING );
8706 // Probe the supported sample rates.
8707 info.sampleRates.clear();
8708 if ( ainfo.nrates ) {
8709 for ( unsigned int i=0; i<ainfo.nrates; i++ ) {
8710 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
8711 if ( ainfo.rates[i] == SAMPLE_RATES[k] ) {
8712 info.sampleRates.push_back( SAMPLE_RATES[k] );
8714 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
8715 info.preferredSampleRate = SAMPLE_RATES[k];
8723 // Check min and max rate values;
8724 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
8725 if ( ainfo.min_rate <= (int) SAMPLE_RATES[k] && ainfo.max_rate >= (int) SAMPLE_RATES[k] ) {
8726 info.sampleRates.push_back( SAMPLE_RATES[k] );
8728 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
8729 info.preferredSampleRate = SAMPLE_RATES[k];
8734 if ( info.sampleRates.size() == 0 ) {
8735 errorStream_ << "RtApiOss::getDeviceInfo: no supported sample rates found for device (" << ainfo.name << ").";
8736 errorText_ = errorStream_.str();
8737 error( RtAudioError::WARNING );
8741 info.name = ainfo.name;
8748 bool RtApiOss :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
8749 unsigned int firstChannel, unsigned int sampleRate,
8750 RtAudioFormat format, unsigned int *bufferSize,
8751 RtAudio::StreamOptions *options )
8753 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
8754 if ( mixerfd == -1 ) {
8755 errorText_ = "RtApiOss::probeDeviceOpen: error opening '/dev/mixer'.";
8759 oss_sysinfo sysinfo;
8760 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
8761 if ( result == -1 ) {
8763 errorText_ = "RtApiOss::probeDeviceOpen: error getting sysinfo, OSS version >= 4.0 is required.";
8767 unsigned nDevices = sysinfo.numaudios;
8768 if ( nDevices == 0 ) {
8769 // This should not happen because a check is made before this function is called.
8771 errorText_ = "RtApiOss::probeDeviceOpen: no devices found!";
8775 if ( device >= nDevices ) {
8776 // This should not happen because a check is made before this function is called.
8778 errorText_ = "RtApiOss::probeDeviceOpen: device ID is invalid!";
8782 oss_audioinfo ainfo;
8784 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
8786 if ( result == -1 ) {
8787 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
8788 errorText_ = errorStream_.str();
8792 // Check if device supports input or output
8793 if ( ( mode == OUTPUT && !( ainfo.caps & PCM_CAP_OUTPUT ) ) ||
8794 ( mode == INPUT && !( ainfo.caps & PCM_CAP_INPUT ) ) ) {
8795 if ( mode == OUTPUT )
8796 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support output.";
8798 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support input.";
8799 errorText_ = errorStream_.str();
8804 OssHandle *handle = (OssHandle *) stream_.apiHandle;
8805 if ( mode == OUTPUT )
8807 else { // mode == INPUT
8808 if (stream_.mode == OUTPUT && stream_.device[0] == device) {
8809 // We just set the same device for playback ... close and reopen for duplex (OSS only).
8810 close( handle->id[0] );
8812 if ( !( ainfo.caps & PCM_CAP_DUPLEX ) ) {
8813 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support duplex mode.";
8814 errorText_ = errorStream_.str();
8817 // Check that the number previously set channels is the same.
8818 if ( stream_.nUserChannels[0] != channels ) {
8819 errorStream_ << "RtApiOss::probeDeviceOpen: input/output channels must be equal for OSS duplex device (" << ainfo.name << ").";
8820 errorText_ = errorStream_.str();
8829 // Set exclusive access if specified.
8830 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) flags |= O_EXCL;
8832 // Try to open the device.
8834 fd = open( ainfo.devnode, flags, 0 );
8836 if ( errno == EBUSY )
8837 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") is busy.";
8839 errorStream_ << "RtApiOss::probeDeviceOpen: error opening device (" << ainfo.name << ").";
8840 errorText_ = errorStream_.str();
8844 // For duplex operation, specifically set this mode (this doesn't seem to work).
8846 if ( flags | O_RDWR ) {
8847 result = ioctl( fd, SNDCTL_DSP_SETDUPLEX, NULL );
8848 if ( result == -1) {
8849 errorStream_ << "RtApiOss::probeDeviceOpen: error setting duplex mode for device (" << ainfo.name << ").";
8850 errorText_ = errorStream_.str();
8856 // Check the device channel support.
8857 stream_.nUserChannels[mode] = channels;
8858 if ( ainfo.max_channels < (int)(channels + firstChannel) ) {
8860 errorStream_ << "RtApiOss::probeDeviceOpen: the device (" << ainfo.name << ") does not support requested channel parameters.";
8861 errorText_ = errorStream_.str();
8865 // Set the number of channels.
8866 int deviceChannels = channels + firstChannel;
8867 result = ioctl( fd, SNDCTL_DSP_CHANNELS, &deviceChannels );
8868 if ( result == -1 || deviceChannels < (int)(channels + firstChannel) ) {
8870 errorStream_ << "RtApiOss::probeDeviceOpen: error setting channel parameters on device (" << ainfo.name << ").";
8871 errorText_ = errorStream_.str();
8874 stream_.nDeviceChannels[mode] = deviceChannels;
8876 // Get the data format mask
8878 result = ioctl( fd, SNDCTL_DSP_GETFMTS, &mask );
8879 if ( result == -1 ) {
8881 errorStream_ << "RtApiOss::probeDeviceOpen: error getting device (" << ainfo.name << ") data formats.";
8882 errorText_ = errorStream_.str();
8886 // Determine how to set the device format.
8887 stream_.userFormat = format;
8888 int deviceFormat = -1;
8889 stream_.doByteSwap[mode] = false;
8890 if ( format == RTAUDIO_SINT8 ) {
8891 if ( mask & AFMT_S8 ) {
8892 deviceFormat = AFMT_S8;
8893 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
8896 else if ( format == RTAUDIO_SINT16 ) {
8897 if ( mask & AFMT_S16_NE ) {
8898 deviceFormat = AFMT_S16_NE;
8899 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
8901 else if ( mask & AFMT_S16_OE ) {
8902 deviceFormat = AFMT_S16_OE;
8903 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
8904 stream_.doByteSwap[mode] = true;
8907 else if ( format == RTAUDIO_SINT24 ) {
8908 if ( mask & AFMT_S24_NE ) {
8909 deviceFormat = AFMT_S24_NE;
8910 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
8912 else if ( mask & AFMT_S24_OE ) {
8913 deviceFormat = AFMT_S24_OE;
8914 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
8915 stream_.doByteSwap[mode] = true;
8918 else if ( format == RTAUDIO_SINT32 ) {
8919 if ( mask & AFMT_S32_NE ) {
8920 deviceFormat = AFMT_S32_NE;
8921 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
8923 else if ( mask & AFMT_S32_OE ) {
8924 deviceFormat = AFMT_S32_OE;
8925 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
8926 stream_.doByteSwap[mode] = true;
8930 if ( deviceFormat == -1 ) {
8931 // The user requested format is not natively supported by the device.
8932 if ( mask & AFMT_S16_NE ) {
8933 deviceFormat = AFMT_S16_NE;
8934 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
8936 else if ( mask & AFMT_S32_NE ) {
8937 deviceFormat = AFMT_S32_NE;
8938 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
8940 else if ( mask & AFMT_S24_NE ) {
8941 deviceFormat = AFMT_S24_NE;
8942 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
8944 else if ( mask & AFMT_S16_OE ) {
8945 deviceFormat = AFMT_S16_OE;
8946 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
8947 stream_.doByteSwap[mode] = true;
8949 else if ( mask & AFMT_S32_OE ) {
8950 deviceFormat = AFMT_S32_OE;
8951 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
8952 stream_.doByteSwap[mode] = true;
8954 else if ( mask & AFMT_S24_OE ) {
8955 deviceFormat = AFMT_S24_OE;
8956 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
8957 stream_.doByteSwap[mode] = true;
8959 else if ( mask & AFMT_S8) {
8960 deviceFormat = AFMT_S8;
8961 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
8965 if ( stream_.deviceFormat[mode] == 0 ) {
8966 // This really shouldn't happen ...
8968 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") data format not supported by RtAudio.";
8969 errorText_ = errorStream_.str();
8973 // Set the data format.
8974 int temp = deviceFormat;
8975 result = ioctl( fd, SNDCTL_DSP_SETFMT, &deviceFormat );
8976 if ( result == -1 || deviceFormat != temp ) {
8978 errorStream_ << "RtApiOss::probeDeviceOpen: error setting data format on device (" << ainfo.name << ").";
8979 errorText_ = errorStream_.str();
8983 // Attempt to set the buffer size. According to OSS, the minimum
8984 // number of buffers is two. The supposed minimum buffer size is 16
8985 // bytes, so that will be our lower bound. The argument to this
8986 // call is in the form 0xMMMMSSSS (hex), where the buffer size (in
8987 // bytes) is given as 2^SSSS and the number of buffers as 2^MMMM.
8988 // We'll check the actual value used near the end of the setup
8990 int ossBufferBytes = *bufferSize * formatBytes( stream_.deviceFormat[mode] ) * deviceChannels;
8991 if ( ossBufferBytes < 16 ) ossBufferBytes = 16;
8993 if ( options ) buffers = options->numberOfBuffers;
8994 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) buffers = 2;
8995 if ( buffers < 2 ) buffers = 3;
8996 temp = ((int) buffers << 16) + (int)( log10( (double)ossBufferBytes ) / log10( 2.0 ) );
8997 result = ioctl( fd, SNDCTL_DSP_SETFRAGMENT, &temp );
8998 if ( result == -1 ) {
9000 errorStream_ << "RtApiOss::probeDeviceOpen: error setting buffer size on device (" << ainfo.name << ").";
9001 errorText_ = errorStream_.str();
9004 stream_.nBuffers = buffers;
9006 // Save buffer size (in sample frames).
9007 *bufferSize = ossBufferBytes / ( formatBytes(stream_.deviceFormat[mode]) * deviceChannels );
9008 stream_.bufferSize = *bufferSize;
9010 // Set the sample rate.
9011 int srate = sampleRate;
9012 result = ioctl( fd, SNDCTL_DSP_SPEED, &srate );
9013 if ( result == -1 ) {
9015 errorStream_ << "RtApiOss::probeDeviceOpen: error setting sample rate (" << sampleRate << ") on device (" << ainfo.name << ").";
9016 errorText_ = errorStream_.str();
9020 // Verify the sample rate setup worked.
9021 if ( abs( srate - (int)sampleRate ) > 100 ) {
9023 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support sample rate (" << sampleRate << ").";
9024 errorText_ = errorStream_.str();
9027 stream_.sampleRate = sampleRate;
9029 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device) {
9030 // We're doing duplex setup here.
9031 stream_.deviceFormat[0] = stream_.deviceFormat[1];
9032 stream_.nDeviceChannels[0] = deviceChannels;
9035 // Set interleaving parameters.
9036 stream_.userInterleaved = true;
9037 stream_.deviceInterleaved[mode] = true;
9038 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
9039 stream_.userInterleaved = false;
9041 // Set flags for buffer conversion
9042 stream_.doConvertBuffer[mode] = false;
9043 if ( stream_.userFormat != stream_.deviceFormat[mode] )
9044 stream_.doConvertBuffer[mode] = true;
9045 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
9046 stream_.doConvertBuffer[mode] = true;
9047 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
9048 stream_.nUserChannels[mode] > 1 )
9049 stream_.doConvertBuffer[mode] = true;
9051 // Allocate the stream handles if necessary and then save.
9052 if ( stream_.apiHandle == 0 ) {
9054 handle = new OssHandle;
9056 catch ( std::bad_alloc& ) {
9057 errorText_ = "RtApiOss::probeDeviceOpen: error allocating OssHandle memory.";
9061 if ( pthread_cond_init( &handle->runnable, NULL ) ) {
9062 errorText_ = "RtApiOss::probeDeviceOpen: error initializing pthread condition variable.";
9066 stream_.apiHandle = (void *) handle;
9069 handle = (OssHandle *) stream_.apiHandle;
9071 handle->id[mode] = fd;
9073 // Allocate necessary internal buffers.
9074 unsigned long bufferBytes;
9075 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
9076 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
9077 if ( stream_.userBuffer[mode] == NULL ) {
9078 errorText_ = "RtApiOss::probeDeviceOpen: error allocating user buffer memory.";
9082 if ( stream_.doConvertBuffer[mode] ) {
9084 bool makeBuffer = true;
9085 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
9086 if ( mode == INPUT ) {
9087 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
9088 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
9089 if ( bufferBytes <= bytesOut ) makeBuffer = false;
9094 bufferBytes *= *bufferSize;
9095 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
9096 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
9097 if ( stream_.deviceBuffer == NULL ) {
9098 errorText_ = "RtApiOss::probeDeviceOpen: error allocating device buffer memory.";
9104 stream_.device[mode] = device;
9105 stream_.state = STREAM_STOPPED;
9107 // Setup the buffer conversion information structure.
9108 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
9110 // Setup thread if necessary.
9111 if ( stream_.mode == OUTPUT && mode == INPUT ) {
9112 // We had already set up an output stream.
9113 stream_.mode = DUPLEX;
9114 if ( stream_.device[0] == device ) handle->id[0] = fd;
9117 stream_.mode = mode;
9119 // Setup callback thread.
9120 stream_.callbackInfo.object = (void *) this;
9122 // Set the thread attributes for joinable and realtime scheduling
9123 // priority. The higher priority will only take affect if the
9124 // program is run as root or suid.
9125 pthread_attr_t attr;
9126 pthread_attr_init( &attr );
9127 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
9128 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
9129 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
9130 stream_.callbackInfo.doRealtime = true;
9131 struct sched_param param;
9132 int priority = options->priority;
9133 int min = sched_get_priority_min( SCHED_RR );
9134 int max = sched_get_priority_max( SCHED_RR );
9135 if ( priority < min ) priority = min;
9136 else if ( priority > max ) priority = max;
9137 param.sched_priority = priority;
9139 // Set the policy BEFORE the priority. Otherwise it fails.
9140 pthread_attr_setschedpolicy(&attr, SCHED_RR);
9141 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
9142 // This is definitely required. Otherwise it fails.
9143 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
9144 pthread_attr_setschedparam(&attr, ¶m);
9147 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
9149 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
9152 stream_.callbackInfo.isRunning = true;
9153 result = pthread_create( &stream_.callbackInfo.thread, &attr, ossCallbackHandler, &stream_.callbackInfo );
9154 pthread_attr_destroy( &attr );
9156 // Failed. Try instead with default attributes.
9157 result = pthread_create( &stream_.callbackInfo.thread, NULL, ossCallbackHandler, &stream_.callbackInfo );
9159 stream_.callbackInfo.isRunning = false;
9160 errorText_ = "RtApiOss::error creating callback thread!";
9170 pthread_cond_destroy( &handle->runnable );
9171 if ( handle->id[0] ) close( handle->id[0] );
9172 if ( handle->id[1] ) close( handle->id[1] );
9174 stream_.apiHandle = 0;
9177 for ( int i=0; i<2; i++ ) {
9178 if ( stream_.userBuffer[i] ) {
9179 free( stream_.userBuffer[i] );
9180 stream_.userBuffer[i] = 0;
9184 if ( stream_.deviceBuffer ) {
9185 free( stream_.deviceBuffer );
9186 stream_.deviceBuffer = 0;
9189 stream_.state = STREAM_CLOSED;
9193 void RtApiOss :: closeStream()
9195 if ( stream_.state == STREAM_CLOSED ) {
9196 errorText_ = "RtApiOss::closeStream(): no open stream to close!";
9197 error( RtAudioError::WARNING );
9201 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9202 stream_.callbackInfo.isRunning = false;
9203 MUTEX_LOCK( &stream_.mutex );
9204 if ( stream_.state == STREAM_STOPPED )
9205 pthread_cond_signal( &handle->runnable );
9206 MUTEX_UNLOCK( &stream_.mutex );
9207 pthread_join( stream_.callbackInfo.thread, NULL );
9209 if ( stream_.state == STREAM_RUNNING ) {
9210 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
9211 ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9213 ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9214 stream_.state = STREAM_STOPPED;
9218 pthread_cond_destroy( &handle->runnable );
9219 if ( handle->id[0] ) close( handle->id[0] );
9220 if ( handle->id[1] ) close( handle->id[1] );
9222 stream_.apiHandle = 0;
9225 for ( int i=0; i<2; i++ ) {
9226 if ( stream_.userBuffer[i] ) {
9227 free( stream_.userBuffer[i] );
9228 stream_.userBuffer[i] = 0;
9232 if ( stream_.deviceBuffer ) {
9233 free( stream_.deviceBuffer );
9234 stream_.deviceBuffer = 0;
9237 stream_.mode = UNINITIALIZED;
9238 stream_.state = STREAM_CLOSED;
9241 void RtApiOss :: startStream()
9244 if ( stream_.state == STREAM_RUNNING ) {
9245 errorText_ = "RtApiOss::startStream(): the stream is already running!";
9246 error( RtAudioError::WARNING );
9250 MUTEX_LOCK( &stream_.mutex );
9252 stream_.state = STREAM_RUNNING;
9254 // No need to do anything else here ... OSS automatically starts
9255 // when fed samples.
9257 MUTEX_UNLOCK( &stream_.mutex );
9259 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9260 pthread_cond_signal( &handle->runnable );
9263 void RtApiOss :: stopStream()
9266 if ( stream_.state == STREAM_STOPPED ) {
9267 errorText_ = "RtApiOss::stopStream(): the stream is already stopped!";
9268 error( RtAudioError::WARNING );
9272 MUTEX_LOCK( &stream_.mutex );
9274 // The state might change while waiting on a mutex.
9275 if ( stream_.state == STREAM_STOPPED ) {
9276 MUTEX_UNLOCK( &stream_.mutex );
9281 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9282 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9284 // Flush the output with zeros a few times.
9287 RtAudioFormat format;
9289 if ( stream_.doConvertBuffer[0] ) {
9290 buffer = stream_.deviceBuffer;
9291 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
9292 format = stream_.deviceFormat[0];
9295 buffer = stream_.userBuffer[0];
9296 samples = stream_.bufferSize * stream_.nUserChannels[0];
9297 format = stream_.userFormat;
9300 memset( buffer, 0, samples * formatBytes(format) );
9301 for ( unsigned int i=0; i<stream_.nBuffers+1; i++ ) {
9302 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9303 if ( result == -1 ) {
9304 errorText_ = "RtApiOss::stopStream: audio write error.";
9305 error( RtAudioError::WARNING );
9309 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9310 if ( result == -1 ) {
9311 errorStream_ << "RtApiOss::stopStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
9312 errorText_ = errorStream_.str();
9315 handle->triggered = false;
9318 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
9319 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9320 if ( result == -1 ) {
9321 errorStream_ << "RtApiOss::stopStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
9322 errorText_ = errorStream_.str();
9328 stream_.state = STREAM_STOPPED;
9329 MUTEX_UNLOCK( &stream_.mutex );
9331 if ( result != -1 ) return;
9332 error( RtAudioError::SYSTEM_ERROR );
9335 void RtApiOss :: abortStream()
9338 if ( stream_.state == STREAM_STOPPED ) {
9339 errorText_ = "RtApiOss::abortStream(): the stream is already stopped!";
9340 error( RtAudioError::WARNING );
9344 MUTEX_LOCK( &stream_.mutex );
9346 // The state might change while waiting on a mutex.
9347 if ( stream_.state == STREAM_STOPPED ) {
9348 MUTEX_UNLOCK( &stream_.mutex );
9353 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9354 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9355 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9356 if ( result == -1 ) {
9357 errorStream_ << "RtApiOss::abortStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
9358 errorText_ = errorStream_.str();
9361 handle->triggered = false;
9364 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
9365 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9366 if ( result == -1 ) {
9367 errorStream_ << "RtApiOss::abortStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
9368 errorText_ = errorStream_.str();
9374 stream_.state = STREAM_STOPPED;
9375 MUTEX_UNLOCK( &stream_.mutex );
9377 if ( result != -1 ) return;
9378 error( RtAudioError::SYSTEM_ERROR );
9381 void RtApiOss :: callbackEvent()
9383 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9384 if ( stream_.state == STREAM_STOPPED ) {
9385 MUTEX_LOCK( &stream_.mutex );
9386 pthread_cond_wait( &handle->runnable, &stream_.mutex );
9387 if ( stream_.state != STREAM_RUNNING ) {
9388 MUTEX_UNLOCK( &stream_.mutex );
9391 MUTEX_UNLOCK( &stream_.mutex );
9394 if ( stream_.state == STREAM_CLOSED ) {
9395 errorText_ = "RtApiOss::callbackEvent(): the stream is closed ... this shouldn't happen!";
9396 error( RtAudioError::WARNING );
9400 // Invoke user callback to get fresh output data.
9401 int doStopStream = 0;
9402 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
9403 double streamTime = getStreamTime();
9404 RtAudioStreamStatus status = 0;
9405 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
9406 status |= RTAUDIO_OUTPUT_UNDERFLOW;
9407 handle->xrun[0] = false;
9409 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
9410 status |= RTAUDIO_INPUT_OVERFLOW;
9411 handle->xrun[1] = false;
9413 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
9414 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
9415 if ( doStopStream == 2 ) {
9416 this->abortStream();
9420 MUTEX_LOCK( &stream_.mutex );
9422 // The state might change while waiting on a mutex.
9423 if ( stream_.state == STREAM_STOPPED ) goto unlock;
9428 RtAudioFormat format;
9430 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9432 // Setup parameters and do buffer conversion if necessary.
9433 if ( stream_.doConvertBuffer[0] ) {
9434 buffer = stream_.deviceBuffer;
9435 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
9436 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
9437 format = stream_.deviceFormat[0];
9440 buffer = stream_.userBuffer[0];
9441 samples = stream_.bufferSize * stream_.nUserChannels[0];
9442 format = stream_.userFormat;
9445 // Do byte swapping if necessary.
9446 if ( stream_.doByteSwap[0] )
9447 byteSwapBuffer( buffer, samples, format );
9449 if ( stream_.mode == DUPLEX && handle->triggered == false ) {
9451 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
9452 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9453 trig = PCM_ENABLE_INPUT|PCM_ENABLE_OUTPUT;
9454 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
9455 handle->triggered = true;
9458 // Write samples to device.
9459 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9461 if ( result == -1 ) {
9462 // We'll assume this is an underrun, though there isn't a
9463 // specific means for determining that.
9464 handle->xrun[0] = true;
9465 errorText_ = "RtApiOss::callbackEvent: audio write error.";
9466 error( RtAudioError::WARNING );
9467 // Continue on to input section.
9471 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
9473 // Setup parameters.
9474 if ( stream_.doConvertBuffer[1] ) {
9475 buffer = stream_.deviceBuffer;
9476 samples = stream_.bufferSize * stream_.nDeviceChannels[1];
9477 format = stream_.deviceFormat[1];
9480 buffer = stream_.userBuffer[1];
9481 samples = stream_.bufferSize * stream_.nUserChannels[1];
9482 format = stream_.userFormat;
9485 // Read samples from device.
9486 result = read( handle->id[1], buffer, samples * formatBytes(format) );
9488 if ( result == -1 ) {
9489 // We'll assume this is an overrun, though there isn't a
9490 // specific means for determining that.
9491 handle->xrun[1] = true;
9492 errorText_ = "RtApiOss::callbackEvent: audio read error.";
9493 error( RtAudioError::WARNING );
9497 // Do byte swapping if necessary.
9498 if ( stream_.doByteSwap[1] )
9499 byteSwapBuffer( buffer, samples, format );
9501 // Do buffer conversion if necessary.
9502 if ( stream_.doConvertBuffer[1] )
9503 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
9507 MUTEX_UNLOCK( &stream_.mutex );
9509 RtApi::tickStreamTime();
9510 if ( doStopStream == 1 ) this->stopStream();
9513 static void *ossCallbackHandler( void *ptr )
9515 CallbackInfo *info = (CallbackInfo *) ptr;
9516 RtApiOss *object = (RtApiOss *) info->object;
9517 bool *isRunning = &info->isRunning;
9519 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
9520 if (info->doRealtime) {
9521 std::cerr << "RtAudio oss: " <<
9522 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
9523 "running realtime scheduling" << std::endl;
9527 while ( *isRunning == true ) {
9528 pthread_testcancel();
9529 object->callbackEvent();
9532 pthread_exit( NULL );
9535 //******************** End of __LINUX_OSS__ *********************//
9539 // *************************************************** //
9541 // Protected common (OS-independent) RtAudio methods.
9543 // *************************************************** //
9545 // This method can be modified to control the behavior of error
9546 // message printing.
9547 void RtApi :: error( RtAudioError::Type type )
9549 errorStream_.str(""); // clear the ostringstream
9551 RtAudioErrorCallback errorCallback = (RtAudioErrorCallback) stream_.callbackInfo.errorCallback;
9552 if ( errorCallback ) {
9553 // abortStream() can generate new error messages. Ignore them. Just keep original one.
9555 if ( firstErrorOccurred_ )
9558 firstErrorOccurred_ = true;
9559 const std::string errorMessage = errorText_;
9561 if ( type != RtAudioError::WARNING && stream_.state != STREAM_STOPPED) {
9562 stream_.callbackInfo.isRunning = false; // exit from the thread
9566 errorCallback( type, errorMessage );
9567 firstErrorOccurred_ = false;
9571 if ( type == RtAudioError::WARNING && showWarnings_ == true )
9572 std::cerr << '\n' << errorText_ << "\n\n";
9573 else if ( type != RtAudioError::WARNING )
9574 throw( RtAudioError( errorText_, type ) );
9577 void RtApi :: verifyStream()
9579 if ( stream_.state == STREAM_CLOSED ) {
9580 errorText_ = "RtApi:: a stream is not open!";
9581 error( RtAudioError::INVALID_USE );
9585 void RtApi :: clearStreamInfo()
9587 stream_.mode = UNINITIALIZED;
9588 stream_.state = STREAM_CLOSED;
9589 stream_.sampleRate = 0;
9590 stream_.bufferSize = 0;
9591 stream_.nBuffers = 0;
9592 stream_.userFormat = 0;
9593 stream_.userInterleaved = true;
9594 stream_.streamTime = 0.0;
9595 stream_.apiHandle = 0;
9596 stream_.deviceBuffer = 0;
9597 stream_.callbackInfo.callback = 0;
9598 stream_.callbackInfo.userData = 0;
9599 stream_.callbackInfo.isRunning = false;
9600 stream_.callbackInfo.errorCallback = 0;
9601 for ( int i=0; i<2; i++ ) {
9602 stream_.device[i] = 11111;
9603 stream_.doConvertBuffer[i] = false;
9604 stream_.deviceInterleaved[i] = true;
9605 stream_.doByteSwap[i] = false;
9606 stream_.nUserChannels[i] = 0;
9607 stream_.nDeviceChannels[i] = 0;
9608 stream_.channelOffset[i] = 0;
9609 stream_.deviceFormat[i] = 0;
9610 stream_.latency[i] = 0;
9611 stream_.userBuffer[i] = 0;
9612 stream_.convertInfo[i].channels = 0;
9613 stream_.convertInfo[i].inJump = 0;
9614 stream_.convertInfo[i].outJump = 0;
9615 stream_.convertInfo[i].inFormat = 0;
9616 stream_.convertInfo[i].outFormat = 0;
9617 stream_.convertInfo[i].inOffset.clear();
9618 stream_.convertInfo[i].outOffset.clear();
9622 unsigned int RtApi :: formatBytes( RtAudioFormat format )
9624 if ( format == RTAUDIO_SINT16 )
9626 else if ( format == RTAUDIO_SINT32 || format == RTAUDIO_FLOAT32 )
9628 else if ( format == RTAUDIO_FLOAT64 )
9630 else if ( format == RTAUDIO_SINT24 )
9632 else if ( format == RTAUDIO_SINT8 )
9635 errorText_ = "RtApi::formatBytes: undefined format.";
9636 error( RtAudioError::WARNING );
9641 void RtApi :: setConvertInfo( StreamMode mode, unsigned int firstChannel )
9643 if ( mode == INPUT ) { // convert device to user buffer
9644 stream_.convertInfo[mode].inJump = stream_.nDeviceChannels[1];
9645 stream_.convertInfo[mode].outJump = stream_.nUserChannels[1];
9646 stream_.convertInfo[mode].inFormat = stream_.deviceFormat[1];
9647 stream_.convertInfo[mode].outFormat = stream_.userFormat;
9649 else { // convert user to device buffer
9650 stream_.convertInfo[mode].inJump = stream_.nUserChannels[0];
9651 stream_.convertInfo[mode].outJump = stream_.nDeviceChannels[0];
9652 stream_.convertInfo[mode].inFormat = stream_.userFormat;
9653 stream_.convertInfo[mode].outFormat = stream_.deviceFormat[0];
9656 if ( stream_.convertInfo[mode].inJump < stream_.convertInfo[mode].outJump )
9657 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].inJump;
9659 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].outJump;
9661 // Set up the interleave/deinterleave offsets.
9662 if ( stream_.deviceInterleaved[mode] != stream_.userInterleaved ) {
9663 if ( ( mode == OUTPUT && stream_.deviceInterleaved[mode] ) ||
9664 ( mode == INPUT && stream_.userInterleaved ) ) {
9665 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
9666 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
9667 stream_.convertInfo[mode].outOffset.push_back( k );
9668 stream_.convertInfo[mode].inJump = 1;
9672 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
9673 stream_.convertInfo[mode].inOffset.push_back( k );
9674 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
9675 stream_.convertInfo[mode].outJump = 1;
9679 else { // no (de)interleaving
9680 if ( stream_.userInterleaved ) {
9681 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
9682 stream_.convertInfo[mode].inOffset.push_back( k );
9683 stream_.convertInfo[mode].outOffset.push_back( k );
9687 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
9688 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
9689 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
9690 stream_.convertInfo[mode].inJump = 1;
9691 stream_.convertInfo[mode].outJump = 1;
9696 // Add channel offset.
9697 if ( firstChannel > 0 ) {
9698 if ( stream_.deviceInterleaved[mode] ) {
9699 if ( mode == OUTPUT ) {
9700 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
9701 stream_.convertInfo[mode].outOffset[k] += firstChannel;
9704 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
9705 stream_.convertInfo[mode].inOffset[k] += firstChannel;
9709 if ( mode == OUTPUT ) {
9710 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
9711 stream_.convertInfo[mode].outOffset[k] += ( firstChannel * stream_.bufferSize );
9714 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
9715 stream_.convertInfo[mode].inOffset[k] += ( firstChannel * stream_.bufferSize );
9721 void RtApi :: convertBuffer( char *outBuffer, char *inBuffer, ConvertInfo &info )
9723 // This function does format conversion, input/output channel compensation, and
9724 // data interleaving/deinterleaving. 24-bit integers are assumed to occupy
9725 // the lower three bytes of a 32-bit integer.
9727 // Clear our device buffer when in/out duplex device channels are different
9728 if ( outBuffer == stream_.deviceBuffer && stream_.mode == DUPLEX &&
9729 ( stream_.nDeviceChannels[0] < stream_.nDeviceChannels[1] ) )
9730 memset( outBuffer, 0, stream_.bufferSize * info.outJump * formatBytes( info.outFormat ) );
9733 if (info.outFormat == RTAUDIO_FLOAT64) {
9735 Float64 *out = (Float64 *)outBuffer;
9737 if (info.inFormat == RTAUDIO_SINT8) {
9738 signed char *in = (signed char *)inBuffer;
9739 scale = 1.0 / 127.5;
9740 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9741 for (j=0; j<info.channels; j++) {
9742 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
9743 out[info.outOffset[j]] += 0.5;
9744 out[info.outOffset[j]] *= scale;
9747 out += info.outJump;
9750 else if (info.inFormat == RTAUDIO_SINT16) {
9751 Int16 *in = (Int16 *)inBuffer;
9752 scale = 1.0 / 32767.5;
9753 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9754 for (j=0; j<info.channels; j++) {
9755 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
9756 out[info.outOffset[j]] += 0.5;
9757 out[info.outOffset[j]] *= scale;
9760 out += info.outJump;
9763 else if (info.inFormat == RTAUDIO_SINT24) {
9764 Int24 *in = (Int24 *)inBuffer;
9765 scale = 1.0 / 8388607.5;
9766 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9767 for (j=0; j<info.channels; j++) {
9768 out[info.outOffset[j]] = (Float64) (in[info.inOffset[j]].asInt());
9769 out[info.outOffset[j]] += 0.5;
9770 out[info.outOffset[j]] *= scale;
9773 out += info.outJump;
9776 else if (info.inFormat == RTAUDIO_SINT32) {
9777 Int32 *in = (Int32 *)inBuffer;
9778 scale = 1.0 / 2147483647.5;
9779 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9780 for (j=0; j<info.channels; j++) {
9781 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
9782 out[info.outOffset[j]] += 0.5;
9783 out[info.outOffset[j]] *= scale;
9786 out += info.outJump;
9789 else if (info.inFormat == RTAUDIO_FLOAT32) {
9790 Float32 *in = (Float32 *)inBuffer;
9791 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9792 for (j=0; j<info.channels; j++) {
9793 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
9796 out += info.outJump;
9799 else if (info.inFormat == RTAUDIO_FLOAT64) {
9800 // Channel compensation and/or (de)interleaving only.
9801 Float64 *in = (Float64 *)inBuffer;
9802 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9803 for (j=0; j<info.channels; j++) {
9804 out[info.outOffset[j]] = in[info.inOffset[j]];
9807 out += info.outJump;
9811 else if (info.outFormat == RTAUDIO_FLOAT32) {
9813 Float32 *out = (Float32 *)outBuffer;
9815 if (info.inFormat == RTAUDIO_SINT8) {
9816 signed char *in = (signed char *)inBuffer;
9817 scale = (Float32) ( 1.0 / 127.5 );
9818 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9819 for (j=0; j<info.channels; j++) {
9820 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
9821 out[info.outOffset[j]] += 0.5;
9822 out[info.outOffset[j]] *= scale;
9825 out += info.outJump;
9828 else if (info.inFormat == RTAUDIO_SINT16) {
9829 Int16 *in = (Int16 *)inBuffer;
9830 scale = (Float32) ( 1.0 / 32767.5 );
9831 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9832 for (j=0; j<info.channels; j++) {
9833 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
9834 out[info.outOffset[j]] += 0.5;
9835 out[info.outOffset[j]] *= scale;
9838 out += info.outJump;
9841 else if (info.inFormat == RTAUDIO_SINT24) {
9842 Int24 *in = (Int24 *)inBuffer;
9843 scale = (Float32) ( 1.0 / 8388607.5 );
9844 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9845 for (j=0; j<info.channels; j++) {
9846 out[info.outOffset[j]] = (Float32) (in[info.inOffset[j]].asInt());
9847 out[info.outOffset[j]] += 0.5;
9848 out[info.outOffset[j]] *= scale;
9851 out += info.outJump;
9854 else if (info.inFormat == RTAUDIO_SINT32) {
9855 Int32 *in = (Int32 *)inBuffer;
9856 scale = (Float32) ( 1.0 / 2147483647.5 );
9857 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9858 for (j=0; j<info.channels; j++) {
9859 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
9860 out[info.outOffset[j]] += 0.5;
9861 out[info.outOffset[j]] *= scale;
9864 out += info.outJump;
9867 else if (info.inFormat == RTAUDIO_FLOAT32) {
9868 // Channel compensation and/or (de)interleaving only.
9869 Float32 *in = (Float32 *)inBuffer;
9870 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9871 for (j=0; j<info.channels; j++) {
9872 out[info.outOffset[j]] = in[info.inOffset[j]];
9875 out += info.outJump;
9878 else if (info.inFormat == RTAUDIO_FLOAT64) {
9879 Float64 *in = (Float64 *)inBuffer;
9880 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9881 for (j=0; j<info.channels; j++) {
9882 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
9885 out += info.outJump;
9889 else if (info.outFormat == RTAUDIO_SINT32) {
9890 Int32 *out = (Int32 *)outBuffer;
9891 if (info.inFormat == RTAUDIO_SINT8) {
9892 signed char *in = (signed char *)inBuffer;
9893 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9894 for (j=0; j<info.channels; j++) {
9895 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
9896 out[info.outOffset[j]] <<= 24;
9899 out += info.outJump;
9902 else if (info.inFormat == RTAUDIO_SINT16) {
9903 Int16 *in = (Int16 *)inBuffer;
9904 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9905 for (j=0; j<info.channels; j++) {
9906 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
9907 out[info.outOffset[j]] <<= 16;
9910 out += info.outJump;
9913 else if (info.inFormat == RTAUDIO_SINT24) {
9914 Int24 *in = (Int24 *)inBuffer;
9915 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9916 for (j=0; j<info.channels; j++) {
9917 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]].asInt();
9918 out[info.outOffset[j]] <<= 8;
9921 out += info.outJump;
9924 else if (info.inFormat == RTAUDIO_SINT32) {
9925 // Channel compensation and/or (de)interleaving only.
9926 Int32 *in = (Int32 *)inBuffer;
9927 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9928 for (j=0; j<info.channels; j++) {
9929 out[info.outOffset[j]] = in[info.inOffset[j]];
9932 out += info.outJump;
9935 else if (info.inFormat == RTAUDIO_FLOAT32) {
9936 Float32 *in = (Float32 *)inBuffer;
9937 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9938 for (j=0; j<info.channels; j++) {
9939 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
9942 out += info.outJump;
9945 else if (info.inFormat == RTAUDIO_FLOAT64) {
9946 Float64 *in = (Float64 *)inBuffer;
9947 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9948 for (j=0; j<info.channels; j++) {
9949 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
9952 out += info.outJump;
9956 else if (info.outFormat == RTAUDIO_SINT24) {
9957 Int24 *out = (Int24 *)outBuffer;
9958 if (info.inFormat == RTAUDIO_SINT8) {
9959 signed char *in = (signed char *)inBuffer;
9960 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9961 for (j=0; j<info.channels; j++) {
9962 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 16);
9963 //out[info.outOffset[j]] <<= 16;
9966 out += info.outJump;
9969 else if (info.inFormat == RTAUDIO_SINT16) {
9970 Int16 *in = (Int16 *)inBuffer;
9971 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9972 for (j=0; j<info.channels; j++) {
9973 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 8);
9974 //out[info.outOffset[j]] <<= 8;
9977 out += info.outJump;
9980 else if (info.inFormat == RTAUDIO_SINT24) {
9981 // Channel compensation and/or (de)interleaving only.
9982 Int24 *in = (Int24 *)inBuffer;
9983 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9984 for (j=0; j<info.channels; j++) {
9985 out[info.outOffset[j]] = in[info.inOffset[j]];
9988 out += info.outJump;
9991 else if (info.inFormat == RTAUDIO_SINT32) {
9992 Int32 *in = (Int32 *)inBuffer;
9993 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9994 for (j=0; j<info.channels; j++) {
9995 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] >> 8);
9996 //out[info.outOffset[j]] >>= 8;
9999 out += info.outJump;
10002 else if (info.inFormat == RTAUDIO_FLOAT32) {
10003 Float32 *in = (Float32 *)inBuffer;
10004 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10005 for (j=0; j<info.channels; j++) {
10006 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
10009 out += info.outJump;
10012 else if (info.inFormat == RTAUDIO_FLOAT64) {
10013 Float64 *in = (Float64 *)inBuffer;
10014 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10015 for (j=0; j<info.channels; j++) {
10016 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
10019 out += info.outJump;
10023 else if (info.outFormat == RTAUDIO_SINT16) {
10024 Int16 *out = (Int16 *)outBuffer;
10025 if (info.inFormat == RTAUDIO_SINT8) {
10026 signed char *in = (signed char *)inBuffer;
10027 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10028 for (j=0; j<info.channels; j++) {
10029 out[info.outOffset[j]] = (Int16) in[info.inOffset[j]];
10030 out[info.outOffset[j]] <<= 8;
10033 out += info.outJump;
10036 else if (info.inFormat == RTAUDIO_SINT16) {
10037 // Channel compensation and/or (de)interleaving only.
10038 Int16 *in = (Int16 *)inBuffer;
10039 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10040 for (j=0; j<info.channels; j++) {
10041 out[info.outOffset[j]] = in[info.inOffset[j]];
10044 out += info.outJump;
10047 else if (info.inFormat == RTAUDIO_SINT24) {
10048 Int24 *in = (Int24 *)inBuffer;
10049 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10050 for (j=0; j<info.channels; j++) {
10051 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]].asInt() >> 8);
10054 out += info.outJump;
10057 else if (info.inFormat == RTAUDIO_SINT32) {
10058 Int32 *in = (Int32 *)inBuffer;
10059 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10060 for (j=0; j<info.channels; j++) {
10061 out[info.outOffset[j]] = (Int16) ((in[info.inOffset[j]] >> 16) & 0x0000ffff);
10064 out += info.outJump;
10067 else if (info.inFormat == RTAUDIO_FLOAT32) {
10068 Float32 *in = (Float32 *)inBuffer;
10069 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10070 for (j=0; j<info.channels; j++) {
10071 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
10074 out += info.outJump;
10077 else if (info.inFormat == RTAUDIO_FLOAT64) {
10078 Float64 *in = (Float64 *)inBuffer;
10079 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10080 for (j=0; j<info.channels; j++) {
10081 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
10084 out += info.outJump;
10088 else if (info.outFormat == RTAUDIO_SINT8) {
10089 signed char *out = (signed char *)outBuffer;
10090 if (info.inFormat == RTAUDIO_SINT8) {
10091 // Channel compensation and/or (de)interleaving only.
10092 signed char *in = (signed char *)inBuffer;
10093 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10094 for (j=0; j<info.channels; j++) {
10095 out[info.outOffset[j]] = in[info.inOffset[j]];
10098 out += info.outJump;
10101 if (info.inFormat == RTAUDIO_SINT16) {
10102 Int16 *in = (Int16 *)inBuffer;
10103 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10104 for (j=0; j<info.channels; j++) {
10105 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 8) & 0x00ff);
10108 out += info.outJump;
10111 else if (info.inFormat == RTAUDIO_SINT24) {
10112 Int24 *in = (Int24 *)inBuffer;
10113 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10114 for (j=0; j<info.channels; j++) {
10115 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]].asInt() >> 16);
10118 out += info.outJump;
10121 else if (info.inFormat == RTAUDIO_SINT32) {
10122 Int32 *in = (Int32 *)inBuffer;
10123 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10124 for (j=0; j<info.channels; j++) {
10125 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 24) & 0x000000ff);
10128 out += info.outJump;
10131 else if (info.inFormat == RTAUDIO_FLOAT32) {
10132 Float32 *in = (Float32 *)inBuffer;
10133 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10134 for (j=0; j<info.channels; j++) {
10135 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
10138 out += info.outJump;
10141 else if (info.inFormat == RTAUDIO_FLOAT64) {
10142 Float64 *in = (Float64 *)inBuffer;
10143 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10144 for (j=0; j<info.channels; j++) {
10145 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
10148 out += info.outJump;
10154 //static inline uint16_t bswap_16(uint16_t x) { return (x>>8) | (x<<8); }
10155 //static inline uint32_t bswap_32(uint32_t x) { return (bswap_16(x&0xffff)<<16) | (bswap_16(x>>16)); }
10156 //static inline uint64_t bswap_64(uint64_t x) { return (((unsigned long long)bswap_32(x&0xffffffffull))<<32) | (bswap_32(x>>32)); }
10158 void RtApi :: byteSwapBuffer( char *buffer, unsigned int samples, RtAudioFormat format )
10164 if ( format == RTAUDIO_SINT16 ) {
10165 for ( unsigned int i=0; i<samples; i++ ) {
10166 // Swap 1st and 2nd bytes.
10171 // Increment 2 bytes.
10175 else if ( format == RTAUDIO_SINT32 ||
10176 format == RTAUDIO_FLOAT32 ) {
10177 for ( unsigned int i=0; i<samples; i++ ) {
10178 // Swap 1st and 4th bytes.
10183 // Swap 2nd and 3rd bytes.
10189 // Increment 3 more bytes.
10193 else if ( format == RTAUDIO_SINT24 ) {
10194 for ( unsigned int i=0; i<samples; i++ ) {
10195 // Swap 1st and 3rd bytes.
10200 // Increment 2 more bytes.
10204 else if ( format == RTAUDIO_FLOAT64 ) {
10205 for ( unsigned int i=0; i<samples; i++ ) {
10206 // Swap 1st and 8th bytes
10211 // Swap 2nd and 7th bytes
10217 // Swap 3rd and 6th bytes
10223 // Swap 4th and 5th bytes
10229 // Increment 5 more bytes.
10235 // Indentation settings for Vim and Emacs
10237 // Local Variables:
10238 // c-basic-offset: 2
10239 // indent-tabs-mode: nil
10242 // vim: et sts=2 sw=2