1 /************************************************************************/
3 \brief Realtime audio i/o C++ classes.
5 RtAudio provides a common API (Application Programming Interface)
6 for realtime audio input/output across Linux (native ALSA, Jack,
7 and OSS), Macintosh OS X (CoreAudio and Jack), and Windows
8 (DirectSound, ASIO and WASAPI) operating systems.
10 RtAudio WWW site: http://www.music.mcgill.ca/~gary/rtaudio/
12 RtAudio: realtime audio i/o C++ classes
13 Copyright (c) 2001-2017 Gary P. Scavone
15 Permission is hereby granted, free of charge, to any person
16 obtaining a copy of this software and associated documentation files
17 (the "Software"), to deal in the Software without restriction,
18 including without limitation the rights to use, copy, modify, merge,
19 publish, distribute, sublicense, and/or sell copies of the Software,
20 and to permit persons to whom the Software is furnished to do so,
21 subject to the following conditions:
23 The above copyright notice and this permission notice shall be
24 included in all copies or substantial portions of the Software.
26 Any person wishing to distribute modifications to the Software is
27 asked to send the modifications to the original developer so that
28 they can be incorporated into the canonical version. This is,
29 however, not a binding provision of this license.
31 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
32 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
33 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
34 IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
35 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
36 CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
37 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
39 /************************************************************************/
41 // RtAudio: Version 5.0.0
51 // Static variable definitions.
52 const unsigned int RtApi::MAX_SAMPLE_RATES = 14;
53 const unsigned int RtApi::SAMPLE_RATES[] = {
54 4000, 5512, 8000, 9600, 11025, 16000, 22050,
55 32000, 44100, 48000, 88200, 96000, 176400, 192000
58 #if defined(__WINDOWS_DS__) || defined(__WINDOWS_ASIO__) || defined(__WINDOWS_WASAPI__)
59 #define MUTEX_INITIALIZE(A) InitializeCriticalSection(A)
60 #define MUTEX_DESTROY(A) DeleteCriticalSection(A)
61 #define MUTEX_LOCK(A) EnterCriticalSection(A)
62 #define MUTEX_UNLOCK(A) LeaveCriticalSection(A)
66 static std::string convertCharPointerToStdString(const char *text)
68 return std::string(text);
71 static std::string convertCharPointerToStdString(const wchar_t *text)
73 int length = WideCharToMultiByte(CP_UTF8, 0, text, -1, NULL, 0, NULL, NULL);
74 std::string s( length-1, '\0' );
75 WideCharToMultiByte(CP_UTF8, 0, text, -1, &s[0], length, NULL, NULL);
79 #elif defined(__LINUX_ALSA__) || defined(__LINUX_PULSE__) || defined(__UNIX_JACK__) || defined(__LINUX_OSS__) || defined(__MACOSX_CORE__)
81 #define MUTEX_INITIALIZE(A) pthread_mutex_init(A, NULL)
82 #define MUTEX_DESTROY(A) pthread_mutex_destroy(A)
83 #define MUTEX_LOCK(A) pthread_mutex_lock(A)
84 #define MUTEX_UNLOCK(A) pthread_mutex_unlock(A)
86 #define MUTEX_INITIALIZE(A) abs(*A) // dummy definitions
87 #define MUTEX_DESTROY(A) abs(*A) // dummy definitions
90 // *************************************************** //
92 // RtAudio definitions.
94 // *************************************************** //
96 std::string RtAudio :: getVersion( void )
98 return RTAUDIO_VERSION;
101 void RtAudio :: getCompiledApi( std::vector<RtAudio::Api> &apis )
105 // The order here will control the order of RtAudio's API search in
107 #if defined(__UNIX_JACK__)
108 apis.push_back( UNIX_JACK );
110 #if defined(__LINUX_PULSE__)
111 apis.push_back( LINUX_PULSE );
113 #if defined(__LINUX_ALSA__)
114 apis.push_back( LINUX_ALSA );
116 #if defined(__LINUX_OSS__)
117 apis.push_back( LINUX_OSS );
119 #if defined(__WINDOWS_ASIO__)
120 apis.push_back( WINDOWS_ASIO );
122 #if defined(__WINDOWS_WASAPI__)
123 apis.push_back( WINDOWS_WASAPI );
125 #if defined(__WINDOWS_DS__)
126 apis.push_back( WINDOWS_DS );
128 #if defined(__MACOSX_CORE__)
129 apis.push_back( MACOSX_CORE );
131 #if defined(__RTAUDIO_DUMMY__)
132 apis.push_back( RTAUDIO_DUMMY );
136 void RtAudio :: openRtApi( RtAudio::Api api )
142 #if defined(__UNIX_JACK__)
143 if ( api == UNIX_JACK )
144 rtapi_ = new RtApiJack();
146 #if defined(__LINUX_ALSA__)
147 if ( api == LINUX_ALSA )
148 rtapi_ = new RtApiAlsa();
150 #if defined(__LINUX_PULSE__)
151 if ( api == LINUX_PULSE )
152 rtapi_ = new RtApiPulse();
154 #if defined(__LINUX_OSS__)
155 if ( api == LINUX_OSS )
156 rtapi_ = new RtApiOss();
158 #if defined(__WINDOWS_ASIO__)
159 if ( api == WINDOWS_ASIO )
160 rtapi_ = new RtApiAsio();
162 #if defined(__WINDOWS_WASAPI__)
163 if ( api == WINDOWS_WASAPI )
164 rtapi_ = new RtApiWasapi();
166 #if defined(__WINDOWS_DS__)
167 if ( api == WINDOWS_DS )
168 rtapi_ = new RtApiDs();
170 #if defined(__MACOSX_CORE__)
171 if ( api == MACOSX_CORE )
172 rtapi_ = new RtApiCore();
174 #if defined(__RTAUDIO_DUMMY__)
175 if ( api == RTAUDIO_DUMMY )
176 rtapi_ = new RtApiDummy();
180 RtAudio :: RtAudio( RtAudio::Api api )
184 if ( api != UNSPECIFIED ) {
185 // Attempt to open the specified API.
187 if ( rtapi_ ) return;
189 // No compiled support for specified API value. Issue a debug
190 // warning and continue as if no API was specified.
191 std::cerr << "\nRtAudio: no compiled support for specified API argument!\n" << std::endl;
194 // Iterate through the compiled APIs and return as soon as we find
195 // one with at least one device or we reach the end of the list.
196 std::vector< RtAudio::Api > apis;
197 getCompiledApi( apis );
198 for ( unsigned int i=0; i<apis.size(); i++ ) {
199 openRtApi( apis[i] );
200 if ( rtapi_ && rtapi_->getDeviceCount() ) break;
203 if ( rtapi_ ) return;
205 // It should not be possible to get here because the preprocessor
206 // definition __RTAUDIO_DUMMY__ is automatically defined if no
207 // API-specific definitions are passed to the compiler. But just in
208 // case something weird happens, we'll thow an error.
209 std::string errorText = "\nRtAudio: no compiled API support found ... critical error!!\n\n";
210 throw( RtAudioError( errorText, RtAudioError::UNSPECIFIED ) );
213 RtAudio :: ~RtAudio()
219 void RtAudio :: openStream( RtAudio::StreamParameters *outputParameters,
220 RtAudio::StreamParameters *inputParameters,
221 RtAudioFormat format, unsigned int sampleRate,
222 unsigned int *bufferFrames,
223 RtAudioCallback callback, void *userData,
224 RtAudio::StreamOptions *options,
225 RtAudioErrorCallback errorCallback )
227 return rtapi_->openStream( outputParameters, inputParameters, format,
228 sampleRate, bufferFrames, callback,
229 userData, options, errorCallback );
232 // *************************************************** //
234 // Public RtApi definitions (see end of file for
235 // private or protected utility functions).
237 // *************************************************** //
241 stream_.state = STREAM_CLOSED;
242 stream_.mode = UNINITIALIZED;
243 stream_.apiHandle = 0;
244 stream_.userBuffer[0] = 0;
245 stream_.userBuffer[1] = 0;
246 MUTEX_INITIALIZE( &stream_.mutex );
247 showWarnings_ = true;
248 firstErrorOccurred_ = false;
253 MUTEX_DESTROY( &stream_.mutex );
256 void RtApi :: openStream( RtAudio::StreamParameters *oParams,
257 RtAudio::StreamParameters *iParams,
258 RtAudioFormat format, unsigned int sampleRate,
259 unsigned int *bufferFrames,
260 RtAudioCallback callback, void *userData,
261 RtAudio::StreamOptions *options,
262 RtAudioErrorCallback errorCallback )
264 if ( stream_.state != STREAM_CLOSED ) {
265 errorText_ = "RtApi::openStream: a stream is already open!";
266 error( RtAudioError::INVALID_USE );
270 // Clear stream information potentially left from a previously open stream.
273 if ( oParams && oParams->nChannels < 1 ) {
274 errorText_ = "RtApi::openStream: a non-NULL output StreamParameters structure cannot have an nChannels value less than one.";
275 error( RtAudioError::INVALID_USE );
279 if ( iParams && iParams->nChannels < 1 ) {
280 errorText_ = "RtApi::openStream: a non-NULL input StreamParameters structure cannot have an nChannels value less than one.";
281 error( RtAudioError::INVALID_USE );
285 if ( oParams == NULL && iParams == NULL ) {
286 errorText_ = "RtApi::openStream: input and output StreamParameters structures are both NULL!";
287 error( RtAudioError::INVALID_USE );
291 if ( formatBytes(format) == 0 ) {
292 errorText_ = "RtApi::openStream: 'format' parameter value is undefined.";
293 error( RtAudioError::INVALID_USE );
297 unsigned int nDevices = getDeviceCount();
298 unsigned int oChannels = 0;
300 oChannels = oParams->nChannels;
301 if ( oParams->deviceId >= nDevices ) {
302 errorText_ = "RtApi::openStream: output device parameter value is invalid.";
303 error( RtAudioError::INVALID_USE );
308 unsigned int iChannels = 0;
310 iChannels = iParams->nChannels;
311 if ( iParams->deviceId >= nDevices ) {
312 errorText_ = "RtApi::openStream: input device parameter value is invalid.";
313 error( RtAudioError::INVALID_USE );
320 if ( oChannels > 0 ) {
322 result = probeDeviceOpen( oParams->deviceId, OUTPUT, oChannels, oParams->firstChannel,
323 sampleRate, format, bufferFrames, options );
324 if ( result == false ) {
325 error( RtAudioError::SYSTEM_ERROR );
330 if ( iChannels > 0 ) {
332 result = probeDeviceOpen( iParams->deviceId, INPUT, iChannels, iParams->firstChannel,
333 sampleRate, format, bufferFrames, options );
334 if ( result == false ) {
335 if ( oChannels > 0 ) closeStream();
336 error( RtAudioError::SYSTEM_ERROR );
341 stream_.callbackInfo.callback = (void *) callback;
342 stream_.callbackInfo.userData = userData;
343 stream_.callbackInfo.errorCallback = (void *) errorCallback;
345 if ( options ) options->numberOfBuffers = stream_.nBuffers;
346 stream_.state = STREAM_STOPPED;
349 unsigned int RtApi :: getDefaultInputDevice( void )
351 // Should be implemented in subclasses if possible.
355 unsigned int RtApi :: getDefaultOutputDevice( void )
357 // Should be implemented in subclasses if possible.
361 void RtApi :: closeStream( void )
363 // MUST be implemented in subclasses!
367 bool RtApi :: probeDeviceOpen( unsigned int /*device*/, StreamMode /*mode*/, unsigned int /*channels*/,
368 unsigned int /*firstChannel*/, unsigned int /*sampleRate*/,
369 RtAudioFormat /*format*/, unsigned int * /*bufferSize*/,
370 RtAudio::StreamOptions * /*options*/ )
372 // MUST be implemented in subclasses!
376 void RtApi :: tickStreamTime( void )
378 // Subclasses that do not provide their own implementation of
379 // getStreamTime should call this function once per buffer I/O to
380 // provide basic stream time support.
382 stream_.streamTime += ( stream_.bufferSize * 1.0 / stream_.sampleRate );
384 #if defined( HAVE_GETTIMEOFDAY )
385 gettimeofday( &stream_.lastTickTimestamp, NULL );
389 long RtApi :: getStreamLatency( void )
393 long totalLatency = 0;
394 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
395 totalLatency = stream_.latency[0];
396 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
397 totalLatency += stream_.latency[1];
402 double RtApi :: getStreamTime( void )
406 #if defined( HAVE_GETTIMEOFDAY )
407 // Return a very accurate estimate of the stream time by
408 // adding in the elapsed time since the last tick.
412 if ( stream_.state != STREAM_RUNNING || stream_.streamTime == 0.0 )
413 return stream_.streamTime;
415 gettimeofday( &now, NULL );
416 then = stream_.lastTickTimestamp;
417 return stream_.streamTime +
418 ((now.tv_sec + 0.000001 * now.tv_usec) -
419 (then.tv_sec + 0.000001 * then.tv_usec));
421 return stream_.streamTime;
425 void RtApi :: setStreamTime( double time )
430 stream_.streamTime = time;
431 #if defined( HAVE_GETTIMEOFDAY )
432 gettimeofday( &stream_.lastTickTimestamp, NULL );
436 unsigned int RtApi :: getStreamSampleRate( void )
440 return stream_.sampleRate;
444 // *************************************************** //
446 // OS/API-specific methods.
448 // *************************************************** //
450 #if defined(__MACOSX_CORE__)
452 // The OS X CoreAudio API is designed to use a separate callback
453 // procedure for each of its audio devices. A single RtAudio duplex
454 // stream using two different devices is supported here, though it
455 // cannot be guaranteed to always behave correctly because we cannot
456 // synchronize these two callbacks.
458 // A property listener is installed for over/underrun information.
459 // However, no functionality is currently provided to allow property
460 // listeners to trigger user handlers because it is unclear what could
461 // be done if a critical stream parameter (buffer size, sample rate,
462 // device disconnect) notification arrived. The listeners entail
463 // quite a bit of extra code and most likely, a user program wouldn't
464 // be prepared for the result anyway. However, we do provide a flag
465 // to the client callback function to inform of an over/underrun.
467 // A structure to hold various information related to the CoreAudio API
470 AudioDeviceID id[2]; // device ids
471 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
472 AudioDeviceIOProcID procId[2];
474 UInt32 iStream[2]; // device stream index (or first if using multiple)
475 UInt32 nStreams[2]; // number of streams to use
478 pthread_cond_t condition;
479 int drainCounter; // Tracks callback counts when draining
480 bool internalDrain; // Indicates if stop is initiated from callback or not.
483 :deviceBuffer(0), drainCounter(0), internalDrain(false) { nStreams[0] = 1; nStreams[1] = 1; id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
486 RtApiCore:: RtApiCore()
488 #if defined( AVAILABLE_MAC_OS_X_VERSION_10_6_AND_LATER )
489 // This is a largely undocumented but absolutely necessary
490 // requirement starting with OS-X 10.6. If not called, queries and
491 // updates to various audio device properties are not handled
493 CFRunLoopRef theRunLoop = NULL;
494 AudioObjectPropertyAddress property = { kAudioHardwarePropertyRunLoop,
495 kAudioObjectPropertyScopeGlobal,
496 kAudioObjectPropertyElementMaster };
497 OSStatus result = AudioObjectSetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, sizeof(CFRunLoopRef), &theRunLoop);
498 if ( result != noErr ) {
499 errorText_ = "RtApiCore::RtApiCore: error setting run loop property!";
500 error( RtAudioError::WARNING );
505 RtApiCore :: ~RtApiCore()
507 // The subclass destructor gets called before the base class
508 // destructor, so close an existing stream before deallocating
509 // apiDeviceId memory.
510 if ( stream_.state != STREAM_CLOSED ) closeStream();
513 unsigned int RtApiCore :: getDeviceCount( void )
515 // Find out how many audio devices there are, if any.
517 AudioObjectPropertyAddress propertyAddress = { kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
518 OSStatus result = AudioObjectGetPropertyDataSize( kAudioObjectSystemObject, &propertyAddress, 0, NULL, &dataSize );
519 if ( result != noErr ) {
520 errorText_ = "RtApiCore::getDeviceCount: OS-X error getting device info!";
521 error( RtAudioError::WARNING );
525 return dataSize / sizeof( AudioDeviceID );
528 unsigned int RtApiCore :: getDefaultInputDevice( void )
530 unsigned int nDevices = getDeviceCount();
531 if ( nDevices <= 1 ) return 0;
534 UInt32 dataSize = sizeof( AudioDeviceID );
535 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultInputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
536 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
537 if ( result != noErr ) {
538 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device.";
539 error( RtAudioError::WARNING );
543 dataSize *= nDevices;
544 AudioDeviceID deviceList[ nDevices ];
545 property.mSelector = kAudioHardwarePropertyDevices;
546 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
547 if ( result != noErr ) {
548 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device IDs.";
549 error( RtAudioError::WARNING );
553 for ( unsigned int i=0; i<nDevices; i++ )
554 if ( id == deviceList[i] ) return i;
556 errorText_ = "RtApiCore::getDefaultInputDevice: No default device found!";
557 error( RtAudioError::WARNING );
561 unsigned int RtApiCore :: getDefaultOutputDevice( void )
563 unsigned int nDevices = getDeviceCount();
564 if ( nDevices <= 1 ) return 0;
567 UInt32 dataSize = sizeof( AudioDeviceID );
568 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultOutputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
569 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
570 if ( result != noErr ) {
571 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device.";
572 error( RtAudioError::WARNING );
576 dataSize = sizeof( AudioDeviceID ) * nDevices;
577 AudioDeviceID deviceList[ nDevices ];
578 property.mSelector = kAudioHardwarePropertyDevices;
579 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
580 if ( result != noErr ) {
581 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device IDs.";
582 error( RtAudioError::WARNING );
586 for ( unsigned int i=0; i<nDevices; i++ )
587 if ( id == deviceList[i] ) return i;
589 errorText_ = "RtApiCore::getDefaultOutputDevice: No default device found!";
590 error( RtAudioError::WARNING );
594 RtAudio::DeviceInfo RtApiCore :: getDeviceInfo( unsigned int device )
596 RtAudio::DeviceInfo info;
600 unsigned int nDevices = getDeviceCount();
601 if ( nDevices == 0 ) {
602 errorText_ = "RtApiCore::getDeviceInfo: no devices found!";
603 error( RtAudioError::INVALID_USE );
607 if ( device >= nDevices ) {
608 errorText_ = "RtApiCore::getDeviceInfo: device ID is invalid!";
609 error( RtAudioError::INVALID_USE );
613 AudioDeviceID deviceList[ nDevices ];
614 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
615 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
616 kAudioObjectPropertyScopeGlobal,
617 kAudioObjectPropertyElementMaster };
618 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
619 0, NULL, &dataSize, (void *) &deviceList );
620 if ( result != noErr ) {
621 errorText_ = "RtApiCore::getDeviceInfo: OS-X system error getting device IDs.";
622 error( RtAudioError::WARNING );
626 AudioDeviceID id = deviceList[ device ];
628 // Get the device name.
631 dataSize = sizeof( CFStringRef );
632 property.mSelector = kAudioObjectPropertyManufacturer;
633 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
634 if ( result != noErr ) {
635 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device manufacturer.";
636 errorText_ = errorStream_.str();
637 error( RtAudioError::WARNING );
641 //const char *mname = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
642 int length = CFStringGetLength(cfname);
643 char *mname = (char *)malloc(length * 3 + 1);
644 #if defined( UNICODE ) || defined( _UNICODE )
645 CFStringGetCString(cfname, mname, length * 3 + 1, kCFStringEncodingUTF8);
647 CFStringGetCString(cfname, mname, length * 3 + 1, CFStringGetSystemEncoding());
649 info.name.append( (const char *)mname, strlen(mname) );
650 info.name.append( ": " );
654 property.mSelector = kAudioObjectPropertyName;
655 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
656 if ( result != noErr ) {
657 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device name.";
658 errorText_ = errorStream_.str();
659 error( RtAudioError::WARNING );
663 //const char *name = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
664 length = CFStringGetLength(cfname);
665 char *name = (char *)malloc(length * 3 + 1);
666 #if defined( UNICODE ) || defined( _UNICODE )
667 CFStringGetCString(cfname, name, length * 3 + 1, kCFStringEncodingUTF8);
669 CFStringGetCString(cfname, name, length * 3 + 1, CFStringGetSystemEncoding());
671 info.name.append( (const char *)name, strlen(name) );
675 // Get the output stream "configuration".
676 AudioBufferList *bufferList = nil;
677 property.mSelector = kAudioDevicePropertyStreamConfiguration;
678 property.mScope = kAudioDevicePropertyScopeOutput;
679 // property.mElement = kAudioObjectPropertyElementWildcard;
681 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
682 if ( result != noErr || dataSize == 0 ) {
683 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration info for device (" << device << ").";
684 errorText_ = errorStream_.str();
685 error( RtAudioError::WARNING );
689 // Allocate the AudioBufferList.
690 bufferList = (AudioBufferList *) malloc( dataSize );
691 if ( bufferList == NULL ) {
692 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating output AudioBufferList.";
693 error( RtAudioError::WARNING );
697 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
698 if ( result != noErr || dataSize == 0 ) {
700 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration for device (" << device << ").";
701 errorText_ = errorStream_.str();
702 error( RtAudioError::WARNING );
706 // Get output channel information.
707 unsigned int i, nStreams = bufferList->mNumberBuffers;
708 for ( i=0; i<nStreams; i++ )
709 info.outputChannels += bufferList->mBuffers[i].mNumberChannels;
712 // Get the input stream "configuration".
713 property.mScope = kAudioDevicePropertyScopeInput;
714 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
715 if ( result != noErr || dataSize == 0 ) {
716 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration info for device (" << device << ").";
717 errorText_ = errorStream_.str();
718 error( RtAudioError::WARNING );
722 // Allocate the AudioBufferList.
723 bufferList = (AudioBufferList *) malloc( dataSize );
724 if ( bufferList == NULL ) {
725 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating input AudioBufferList.";
726 error( RtAudioError::WARNING );
730 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
731 if (result != noErr || dataSize == 0) {
733 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration for device (" << device << ").";
734 errorText_ = errorStream_.str();
735 error( RtAudioError::WARNING );
739 // Get input channel information.
740 nStreams = bufferList->mNumberBuffers;
741 for ( i=0; i<nStreams; i++ )
742 info.inputChannels += bufferList->mBuffers[i].mNumberChannels;
745 // If device opens for both playback and capture, we determine the channels.
746 if ( info.outputChannels > 0 && info.inputChannels > 0 )
747 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
749 // Probe the device sample rates.
750 bool isInput = false;
751 if ( info.outputChannels == 0 ) isInput = true;
753 // Determine the supported sample rates.
754 property.mSelector = kAudioDevicePropertyAvailableNominalSampleRates;
755 if ( isInput == false ) property.mScope = kAudioDevicePropertyScopeOutput;
756 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
757 if ( result != kAudioHardwareNoError || dataSize == 0 ) {
758 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rate info.";
759 errorText_ = errorStream_.str();
760 error( RtAudioError::WARNING );
764 UInt32 nRanges = dataSize / sizeof( AudioValueRange );
765 AudioValueRange rangeList[ nRanges ];
766 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &rangeList );
767 if ( result != kAudioHardwareNoError ) {
768 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rates.";
769 errorText_ = errorStream_.str();
770 error( RtAudioError::WARNING );
774 // The sample rate reporting mechanism is a bit of a mystery. It
775 // seems that it can either return individual rates or a range of
776 // rates. I assume that if the min / max range values are the same,
777 // then that represents a single supported rate and if the min / max
778 // range values are different, the device supports an arbitrary
779 // range of values (though there might be multiple ranges, so we'll
780 // use the most conservative range).
781 Float64 minimumRate = 1.0, maximumRate = 10000000000.0;
782 bool haveValueRange = false;
783 info.sampleRates.clear();
784 for ( UInt32 i=0; i<nRanges; i++ ) {
785 if ( rangeList[i].mMinimum == rangeList[i].mMaximum ) {
786 unsigned int tmpSr = (unsigned int) rangeList[i].mMinimum;
787 info.sampleRates.push_back( tmpSr );
789 if ( !info.preferredSampleRate || ( tmpSr <= 48000 && tmpSr > info.preferredSampleRate ) )
790 info.preferredSampleRate = tmpSr;
793 haveValueRange = true;
794 if ( rangeList[i].mMinimum > minimumRate ) minimumRate = rangeList[i].mMinimum;
795 if ( rangeList[i].mMaximum < maximumRate ) maximumRate = rangeList[i].mMaximum;
799 if ( haveValueRange ) {
800 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
801 if ( SAMPLE_RATES[k] >= (unsigned int) minimumRate && SAMPLE_RATES[k] <= (unsigned int) maximumRate ) {
802 info.sampleRates.push_back( SAMPLE_RATES[k] );
804 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
805 info.preferredSampleRate = SAMPLE_RATES[k];
810 // Sort and remove any redundant values
811 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
812 info.sampleRates.erase( unique( info.sampleRates.begin(), info.sampleRates.end() ), info.sampleRates.end() );
814 if ( info.sampleRates.size() == 0 ) {
815 errorStream_ << "RtApiCore::probeDeviceInfo: No supported sample rates found for device (" << device << ").";
816 errorText_ = errorStream_.str();
817 error( RtAudioError::WARNING );
821 // CoreAudio always uses 32-bit floating point data for PCM streams.
822 // Thus, any other "physical" formats supported by the device are of
823 // no interest to the client.
824 info.nativeFormats = RTAUDIO_FLOAT32;
826 if ( info.outputChannels > 0 )
827 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
828 if ( info.inputChannels > 0 )
829 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
835 static OSStatus callbackHandler( AudioDeviceID inDevice,
836 const AudioTimeStamp* /*inNow*/,
837 const AudioBufferList* inInputData,
838 const AudioTimeStamp* /*inInputTime*/,
839 AudioBufferList* outOutputData,
840 const AudioTimeStamp* /*inOutputTime*/,
843 CallbackInfo *info = (CallbackInfo *) infoPointer;
845 RtApiCore *object = (RtApiCore *) info->object;
846 if ( object->callbackEvent( inDevice, inInputData, outOutputData ) == false )
847 return kAudioHardwareUnspecifiedError;
849 return kAudioHardwareNoError;
852 static OSStatus xrunListener( AudioObjectID /*inDevice*/,
854 const AudioObjectPropertyAddress properties[],
855 void* handlePointer )
857 CoreHandle *handle = (CoreHandle *) handlePointer;
858 for ( UInt32 i=0; i<nAddresses; i++ ) {
859 if ( properties[i].mSelector == kAudioDeviceProcessorOverload ) {
860 if ( properties[i].mScope == kAudioDevicePropertyScopeInput )
861 handle->xrun[1] = true;
863 handle->xrun[0] = true;
867 return kAudioHardwareNoError;
870 static OSStatus rateListener( AudioObjectID inDevice,
871 UInt32 /*nAddresses*/,
872 const AudioObjectPropertyAddress /*properties*/[],
875 Float64 *rate = (Float64 *) ratePointer;
876 UInt32 dataSize = sizeof( Float64 );
877 AudioObjectPropertyAddress property = { kAudioDevicePropertyNominalSampleRate,
878 kAudioObjectPropertyScopeGlobal,
879 kAudioObjectPropertyElementMaster };
880 AudioObjectGetPropertyData( inDevice, &property, 0, NULL, &dataSize, rate );
881 return kAudioHardwareNoError;
884 bool RtApiCore :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
885 unsigned int firstChannel, unsigned int sampleRate,
886 RtAudioFormat format, unsigned int *bufferSize,
887 RtAudio::StreamOptions *options )
890 unsigned int nDevices = getDeviceCount();
891 if ( nDevices == 0 ) {
892 // This should not happen because a check is made before this function is called.
893 errorText_ = "RtApiCore::probeDeviceOpen: no devices found!";
897 if ( device >= nDevices ) {
898 // This should not happen because a check is made before this function is called.
899 errorText_ = "RtApiCore::probeDeviceOpen: device ID is invalid!";
903 AudioDeviceID deviceList[ nDevices ];
904 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
905 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
906 kAudioObjectPropertyScopeGlobal,
907 kAudioObjectPropertyElementMaster };
908 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
909 0, NULL, &dataSize, (void *) &deviceList );
910 if ( result != noErr ) {
911 errorText_ = "RtApiCore::probeDeviceOpen: OS-X system error getting device IDs.";
915 AudioDeviceID id = deviceList[ device ];
917 // Setup for stream mode.
918 bool isInput = false;
919 if ( mode == INPUT ) {
921 property.mScope = kAudioDevicePropertyScopeInput;
924 property.mScope = kAudioDevicePropertyScopeOutput;
926 // Get the stream "configuration".
927 AudioBufferList *bufferList = nil;
929 property.mSelector = kAudioDevicePropertyStreamConfiguration;
930 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
931 if ( result != noErr || dataSize == 0 ) {
932 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration info for device (" << device << ").";
933 errorText_ = errorStream_.str();
937 // Allocate the AudioBufferList.
938 bufferList = (AudioBufferList *) malloc( dataSize );
939 if ( bufferList == NULL ) {
940 errorText_ = "RtApiCore::probeDeviceOpen: memory error allocating AudioBufferList.";
944 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
945 if (result != noErr || dataSize == 0) {
947 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration for device (" << device << ").";
948 errorText_ = errorStream_.str();
952 // Search for one or more streams that contain the desired number of
953 // channels. CoreAudio devices can have an arbitrary number of
954 // streams and each stream can have an arbitrary number of channels.
955 // For each stream, a single buffer of interleaved samples is
956 // provided. RtAudio prefers the use of one stream of interleaved
957 // data or multiple consecutive single-channel streams. However, we
958 // now support multiple consecutive multi-channel streams of
959 // interleaved data as well.
960 UInt32 iStream, offsetCounter = firstChannel;
961 UInt32 nStreams = bufferList->mNumberBuffers;
962 bool monoMode = false;
963 bool foundStream = false;
965 // First check that the device supports the requested number of
967 UInt32 deviceChannels = 0;
968 for ( iStream=0; iStream<nStreams; iStream++ )
969 deviceChannels += bufferList->mBuffers[iStream].mNumberChannels;
971 if ( deviceChannels < ( channels + firstChannel ) ) {
973 errorStream_ << "RtApiCore::probeDeviceOpen: the device (" << device << ") does not support the requested channel count.";
974 errorText_ = errorStream_.str();
978 // Look for a single stream meeting our needs.
979 UInt32 firstStream, streamCount = 1, streamChannels = 0, channelOffset = 0;
980 for ( iStream=0; iStream<nStreams; iStream++ ) {
981 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
982 if ( streamChannels >= channels + offsetCounter ) {
983 firstStream = iStream;
984 channelOffset = offsetCounter;
988 if ( streamChannels > offsetCounter ) break;
989 offsetCounter -= streamChannels;
992 // If we didn't find a single stream above, then we should be able
993 // to meet the channel specification with multiple streams.
994 if ( foundStream == false ) {
996 offsetCounter = firstChannel;
997 for ( iStream=0; iStream<nStreams; iStream++ ) {
998 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
999 if ( streamChannels > offsetCounter ) break;
1000 offsetCounter -= streamChannels;
1003 firstStream = iStream;
1004 channelOffset = offsetCounter;
1005 Int32 channelCounter = channels + offsetCounter - streamChannels;
1007 if ( streamChannels > 1 ) monoMode = false;
1008 while ( channelCounter > 0 ) {
1009 streamChannels = bufferList->mBuffers[++iStream].mNumberChannels;
1010 if ( streamChannels > 1 ) monoMode = false;
1011 channelCounter -= streamChannels;
1018 // Determine the buffer size.
1019 AudioValueRange bufferRange;
1020 dataSize = sizeof( AudioValueRange );
1021 property.mSelector = kAudioDevicePropertyBufferFrameSizeRange;
1022 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &bufferRange );
1024 if ( result != noErr ) {
1025 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting buffer size range for device (" << device << ").";
1026 errorText_ = errorStream_.str();
1030 if ( bufferRange.mMinimum > *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMinimum;
1031 else if ( bufferRange.mMaximum < *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMaximum;
1032 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) *bufferSize = (unsigned long) bufferRange.mMinimum;
1034 // Set the buffer size. For multiple streams, I'm assuming we only
1035 // need to make this setting for the master channel.
1036 UInt32 theSize = (UInt32) *bufferSize;
1037 dataSize = sizeof( UInt32 );
1038 property.mSelector = kAudioDevicePropertyBufferFrameSize;
1039 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &theSize );
1041 if ( result != noErr ) {
1042 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting the buffer size for device (" << device << ").";
1043 errorText_ = errorStream_.str();
1047 // If attempting to setup a duplex stream, the bufferSize parameter
1048 // MUST be the same in both directions!
1049 *bufferSize = theSize;
1050 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
1051 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << device << ").";
1052 errorText_ = errorStream_.str();
1056 stream_.bufferSize = *bufferSize;
1057 stream_.nBuffers = 1;
1059 // Try to set "hog" mode ... it's not clear to me this is working.
1060 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) {
1062 dataSize = sizeof( hog_pid );
1063 property.mSelector = kAudioDevicePropertyHogMode;
1064 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &hog_pid );
1065 if ( result != noErr ) {
1066 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting 'hog' state!";
1067 errorText_ = errorStream_.str();
1071 if ( hog_pid != getpid() ) {
1073 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &hog_pid );
1074 if ( result != noErr ) {
1075 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting 'hog' state!";
1076 errorText_ = errorStream_.str();
1082 // Check and if necessary, change the sample rate for the device.
1083 Float64 nominalRate;
1084 dataSize = sizeof( Float64 );
1085 property.mSelector = kAudioDevicePropertyNominalSampleRate;
1086 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &nominalRate );
1087 if ( result != noErr ) {
1088 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting current sample rate.";
1089 errorText_ = errorStream_.str();
1093 // Only change the sample rate if off by more than 1 Hz.
1094 if ( fabs( nominalRate - (double)sampleRate ) > 1.0 ) {
1096 // Set a property listener for the sample rate change
1097 Float64 reportedRate = 0.0;
1098 AudioObjectPropertyAddress tmp = { kAudioDevicePropertyNominalSampleRate, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
1099 result = AudioObjectAddPropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1100 if ( result != noErr ) {
1101 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate property listener for device (" << device << ").";
1102 errorText_ = errorStream_.str();
1106 nominalRate = (Float64) sampleRate;
1107 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &nominalRate );
1108 if ( result != noErr ) {
1109 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1110 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate for device (" << device << ").";
1111 errorText_ = errorStream_.str();
1115 // Now wait until the reported nominal rate is what we just set.
1116 UInt32 microCounter = 0;
1117 while ( reportedRate != nominalRate ) {
1118 microCounter += 5000;
1119 if ( microCounter > 5000000 ) break;
1123 // Remove the property listener.
1124 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1126 if ( microCounter > 5000000 ) {
1127 errorStream_ << "RtApiCore::probeDeviceOpen: timeout waiting for sample rate update for device (" << device << ").";
1128 errorText_ = errorStream_.str();
1133 // Now set the stream format for all streams. Also, check the
1134 // physical format of the device and change that if necessary.
1135 AudioStreamBasicDescription description;
1136 dataSize = sizeof( AudioStreamBasicDescription );
1137 property.mSelector = kAudioStreamPropertyVirtualFormat;
1138 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
1139 if ( result != noErr ) {
1140 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream format for device (" << device << ").";
1141 errorText_ = errorStream_.str();
1145 // Set the sample rate and data format id. However, only make the
1146 // change if the sample rate is not within 1.0 of the desired
1147 // rate and the format is not linear pcm.
1148 bool updateFormat = false;
1149 if ( fabs( description.mSampleRate - (Float64)sampleRate ) > 1.0 ) {
1150 description.mSampleRate = (Float64) sampleRate;
1151 updateFormat = true;
1154 if ( description.mFormatID != kAudioFormatLinearPCM ) {
1155 description.mFormatID = kAudioFormatLinearPCM;
1156 updateFormat = true;
1159 if ( updateFormat ) {
1160 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &description );
1161 if ( result != noErr ) {
1162 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate or data format for device (" << device << ").";
1163 errorText_ = errorStream_.str();
1168 // Now check the physical format.
1169 property.mSelector = kAudioStreamPropertyPhysicalFormat;
1170 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
1171 if ( result != noErr ) {
1172 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream physical format for device (" << device << ").";
1173 errorText_ = errorStream_.str();
1177 //std::cout << "Current physical stream format:" << std::endl;
1178 //std::cout << " mBitsPerChan = " << description.mBitsPerChannel << std::endl;
1179 //std::cout << " aligned high = " << (description.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (description.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
1180 //std::cout << " bytesPerFrame = " << description.mBytesPerFrame << std::endl;
1181 //std::cout << " sample rate = " << description.mSampleRate << std::endl;
1183 if ( description.mFormatID != kAudioFormatLinearPCM || description.mBitsPerChannel < 16 ) {
1184 description.mFormatID = kAudioFormatLinearPCM;
1185 //description.mSampleRate = (Float64) sampleRate;
1186 AudioStreamBasicDescription testDescription = description;
1189 // We'll try higher bit rates first and then work our way down.
1190 std::vector< std::pair<UInt32, UInt32> > physicalFormats;
1191 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsFloat) & ~kLinearPCMFormatFlagIsSignedInteger;
1192 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
1193 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
1194 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
1195 physicalFormats.push_back( std::pair<Float32, UInt32>( 24, formatFlags ) ); // 24-bit packed
1196 formatFlags &= ~( kAudioFormatFlagIsPacked | kAudioFormatFlagIsAlignedHigh );
1197 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.2, formatFlags ) ); // 24-bit in 4 bytes, aligned low
1198 formatFlags |= kAudioFormatFlagIsAlignedHigh;
1199 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.4, formatFlags ) ); // 24-bit in 4 bytes, aligned high
1200 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
1201 physicalFormats.push_back( std::pair<Float32, UInt32>( 16, formatFlags ) );
1202 physicalFormats.push_back( std::pair<Float32, UInt32>( 8, formatFlags ) );
1204 bool setPhysicalFormat = false;
1205 for( unsigned int i=0; i<physicalFormats.size(); i++ ) {
1206 testDescription = description;
1207 testDescription.mBitsPerChannel = (UInt32) physicalFormats[i].first;
1208 testDescription.mFormatFlags = physicalFormats[i].second;
1209 if ( (24 == (UInt32)physicalFormats[i].first) && ~( physicalFormats[i].second & kAudioFormatFlagIsPacked ) )
1210 testDescription.mBytesPerFrame = 4 * testDescription.mChannelsPerFrame;
1212 testDescription.mBytesPerFrame = testDescription.mBitsPerChannel/8 * testDescription.mChannelsPerFrame;
1213 testDescription.mBytesPerPacket = testDescription.mBytesPerFrame * testDescription.mFramesPerPacket;
1214 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &testDescription );
1215 if ( result == noErr ) {
1216 setPhysicalFormat = true;
1217 //std::cout << "Updated physical stream format:" << std::endl;
1218 //std::cout << " mBitsPerChan = " << testDescription.mBitsPerChannel << std::endl;
1219 //std::cout << " aligned high = " << (testDescription.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (testDescription.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
1220 //std::cout << " bytesPerFrame = " << testDescription.mBytesPerFrame << std::endl;
1221 //std::cout << " sample rate = " << testDescription.mSampleRate << std::endl;
1226 if ( !setPhysicalFormat ) {
1227 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting physical data format for device (" << device << ").";
1228 errorText_ = errorStream_.str();
1231 } // done setting virtual/physical formats.
1233 // Get the stream / device latency.
1235 dataSize = sizeof( UInt32 );
1236 property.mSelector = kAudioDevicePropertyLatency;
1237 if ( AudioObjectHasProperty( id, &property ) == true ) {
1238 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &latency );
1239 if ( result == kAudioHardwareNoError ) stream_.latency[ mode ] = latency;
1241 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting device latency for device (" << device << ").";
1242 errorText_ = errorStream_.str();
1243 error( RtAudioError::WARNING );
1247 // Byte-swapping: According to AudioHardware.h, the stream data will
1248 // always be presented in native-endian format, so we should never
1249 // need to byte swap.
1250 stream_.doByteSwap[mode] = false;
1252 // From the CoreAudio documentation, PCM data must be supplied as
1254 stream_.userFormat = format;
1255 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
1257 if ( streamCount == 1 )
1258 stream_.nDeviceChannels[mode] = description.mChannelsPerFrame;
1259 else // multiple streams
1260 stream_.nDeviceChannels[mode] = channels;
1261 stream_.nUserChannels[mode] = channels;
1262 stream_.channelOffset[mode] = channelOffset; // offset within a CoreAudio stream
1263 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
1264 else stream_.userInterleaved = true;
1265 stream_.deviceInterleaved[mode] = true;
1266 if ( monoMode == true ) stream_.deviceInterleaved[mode] = false;
1268 // Set flags for buffer conversion.
1269 stream_.doConvertBuffer[mode] = false;
1270 if ( stream_.userFormat != stream_.deviceFormat[mode] )
1271 stream_.doConvertBuffer[mode] = true;
1272 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
1273 stream_.doConvertBuffer[mode] = true;
1274 if ( streamCount == 1 ) {
1275 if ( stream_.nUserChannels[mode] > 1 &&
1276 stream_.userInterleaved != stream_.deviceInterleaved[mode] )
1277 stream_.doConvertBuffer[mode] = true;
1279 else if ( monoMode && stream_.userInterleaved )
1280 stream_.doConvertBuffer[mode] = true;
1282 // Allocate our CoreHandle structure for the stream.
1283 CoreHandle *handle = 0;
1284 if ( stream_.apiHandle == 0 ) {
1286 handle = new CoreHandle;
1288 catch ( std::bad_alloc& ) {
1289 errorText_ = "RtApiCore::probeDeviceOpen: error allocating CoreHandle memory.";
1293 if ( pthread_cond_init( &handle->condition, NULL ) ) {
1294 errorText_ = "RtApiCore::probeDeviceOpen: error initializing pthread condition variable.";
1297 stream_.apiHandle = (void *) handle;
1300 handle = (CoreHandle *) stream_.apiHandle;
1301 handle->iStream[mode] = firstStream;
1302 handle->nStreams[mode] = streamCount;
1303 handle->id[mode] = id;
1305 // Allocate necessary internal buffers.
1306 unsigned long bufferBytes;
1307 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
1308 // stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
1309 stream_.userBuffer[mode] = (char *) malloc( bufferBytes * sizeof(char) );
1310 memset( stream_.userBuffer[mode], 0, bufferBytes * sizeof(char) );
1311 if ( stream_.userBuffer[mode] == NULL ) {
1312 errorText_ = "RtApiCore::probeDeviceOpen: error allocating user buffer memory.";
1316 // If possible, we will make use of the CoreAudio stream buffers as
1317 // "device buffers". However, we can't do this if using multiple
1319 if ( stream_.doConvertBuffer[mode] && handle->nStreams[mode] > 1 ) {
1321 bool makeBuffer = true;
1322 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
1323 if ( mode == INPUT ) {
1324 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
1325 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
1326 if ( bufferBytes <= bytesOut ) makeBuffer = false;
1331 bufferBytes *= *bufferSize;
1332 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
1333 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
1334 if ( stream_.deviceBuffer == NULL ) {
1335 errorText_ = "RtApiCore::probeDeviceOpen: error allocating device buffer memory.";
1341 stream_.sampleRate = sampleRate;
1342 stream_.device[mode] = device;
1343 stream_.state = STREAM_STOPPED;
1344 stream_.callbackInfo.object = (void *) this;
1346 // Setup the buffer conversion information structure.
1347 if ( stream_.doConvertBuffer[mode] ) {
1348 if ( streamCount > 1 ) setConvertInfo( mode, 0 );
1349 else setConvertInfo( mode, channelOffset );
1352 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device )
1353 // Only one callback procedure per device.
1354 stream_.mode = DUPLEX;
1356 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1357 result = AudioDeviceCreateIOProcID( id, callbackHandler, (void *) &stream_.callbackInfo, &handle->procId[mode] );
1359 // deprecated in favor of AudioDeviceCreateIOProcID()
1360 result = AudioDeviceAddIOProc( id, callbackHandler, (void *) &stream_.callbackInfo );
1362 if ( result != noErr ) {
1363 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting callback for device (" << device << ").";
1364 errorText_ = errorStream_.str();
1367 if ( stream_.mode == OUTPUT && mode == INPUT )
1368 stream_.mode = DUPLEX;
1370 stream_.mode = mode;
1373 // Setup the device property listener for over/underload.
1374 property.mSelector = kAudioDeviceProcessorOverload;
1375 property.mScope = kAudioObjectPropertyScopeGlobal;
1376 result = AudioObjectAddPropertyListener( id, &property, xrunListener, (void *) handle );
1382 pthread_cond_destroy( &handle->condition );
1384 stream_.apiHandle = 0;
1387 for ( int i=0; i<2; i++ ) {
1388 if ( stream_.userBuffer[i] ) {
1389 free( stream_.userBuffer[i] );
1390 stream_.userBuffer[i] = 0;
1394 if ( stream_.deviceBuffer ) {
1395 free( stream_.deviceBuffer );
1396 stream_.deviceBuffer = 0;
1399 stream_.state = STREAM_CLOSED;
1403 void RtApiCore :: closeStream( void )
1405 if ( stream_.state == STREAM_CLOSED ) {
1406 errorText_ = "RtApiCore::closeStream(): no open stream to close!";
1407 error( RtAudioError::WARNING );
1411 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1412 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1414 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
1415 kAudioObjectPropertyScopeGlobal,
1416 kAudioObjectPropertyElementMaster };
1418 property.mSelector = kAudioDeviceProcessorOverload;
1419 property.mScope = kAudioObjectPropertyScopeGlobal;
1420 if (AudioObjectRemovePropertyListener( handle->id[0], &property, xrunListener, (void *) handle ) != noErr) {
1421 errorText_ = "RtApiCore::closeStream(): error removing property listener!";
1422 error( RtAudioError::WARNING );
1425 if ( stream_.state == STREAM_RUNNING )
1426 AudioDeviceStop( handle->id[0], callbackHandler );
1427 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1428 AudioDeviceDestroyIOProcID( handle->id[0], handle->procId[0] );
1430 // deprecated in favor of AudioDeviceDestroyIOProcID()
1431 AudioDeviceRemoveIOProc( handle->id[0], callbackHandler );
1435 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1437 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
1438 kAudioObjectPropertyScopeGlobal,
1439 kAudioObjectPropertyElementMaster };
1441 property.mSelector = kAudioDeviceProcessorOverload;
1442 property.mScope = kAudioObjectPropertyScopeGlobal;
1443 if (AudioObjectRemovePropertyListener( handle->id[1], &property, xrunListener, (void *) handle ) != noErr) {
1444 errorText_ = "RtApiCore::closeStream(): error removing property listener!";
1445 error( RtAudioError::WARNING );
1448 if ( stream_.state == STREAM_RUNNING )
1449 AudioDeviceStop( handle->id[1], callbackHandler );
1450 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1451 AudioDeviceDestroyIOProcID( handle->id[1], handle->procId[1] );
1453 // deprecated in favor of AudioDeviceDestroyIOProcID()
1454 AudioDeviceRemoveIOProc( handle->id[1], callbackHandler );
1458 for ( int i=0; i<2; i++ ) {
1459 if ( stream_.userBuffer[i] ) {
1460 free( stream_.userBuffer[i] );
1461 stream_.userBuffer[i] = 0;
1465 if ( stream_.deviceBuffer ) {
1466 free( stream_.deviceBuffer );
1467 stream_.deviceBuffer = 0;
1470 // Destroy pthread condition variable.
1471 pthread_cond_destroy( &handle->condition );
1473 stream_.apiHandle = 0;
1475 stream_.mode = UNINITIALIZED;
1476 stream_.state = STREAM_CLOSED;
1479 void RtApiCore :: startStream( void )
1482 if ( stream_.state == STREAM_RUNNING ) {
1483 errorText_ = "RtApiCore::startStream(): the stream is already running!";
1484 error( RtAudioError::WARNING );
1488 OSStatus result = noErr;
1489 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1490 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1492 result = AudioDeviceStart( handle->id[0], callbackHandler );
1493 if ( result != noErr ) {
1494 errorStream_ << "RtApiCore::startStream: system error (" << getErrorCode( result ) << ") starting callback procedure on device (" << stream_.device[0] << ").";
1495 errorText_ = errorStream_.str();
1500 if ( stream_.mode == INPUT ||
1501 ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1503 result = AudioDeviceStart( handle->id[1], callbackHandler );
1504 if ( result != noErr ) {
1505 errorStream_ << "RtApiCore::startStream: system error starting input callback procedure on device (" << stream_.device[1] << ").";
1506 errorText_ = errorStream_.str();
1511 handle->drainCounter = 0;
1512 handle->internalDrain = false;
1513 stream_.state = STREAM_RUNNING;
1516 if ( result == noErr ) return;
1517 error( RtAudioError::SYSTEM_ERROR );
1520 void RtApiCore :: stopStream( void )
1523 if ( stream_.state == STREAM_STOPPED ) {
1524 errorText_ = "RtApiCore::stopStream(): the stream is already stopped!";
1525 error( RtAudioError::WARNING );
1529 OSStatus result = noErr;
1530 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1531 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1533 if ( handle->drainCounter == 0 ) {
1534 handle->drainCounter = 2;
1535 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
1538 result = AudioDeviceStop( handle->id[0], callbackHandler );
1539 if ( result != noErr ) {
1540 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping callback procedure on device (" << stream_.device[0] << ").";
1541 errorText_ = errorStream_.str();
1546 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1548 result = AudioDeviceStop( handle->id[1], callbackHandler );
1549 if ( result != noErr ) {
1550 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping input callback procedure on device (" << stream_.device[1] << ").";
1551 errorText_ = errorStream_.str();
1556 stream_.state = STREAM_STOPPED;
1559 if ( result == noErr ) return;
1560 error( RtAudioError::SYSTEM_ERROR );
1563 void RtApiCore :: abortStream( void )
1566 if ( stream_.state == STREAM_STOPPED ) {
1567 errorText_ = "RtApiCore::abortStream(): the stream is already stopped!";
1568 error( RtAudioError::WARNING );
1572 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1573 handle->drainCounter = 2;
1578 // This function will be called by a spawned thread when the user
1579 // callback function signals that the stream should be stopped or
1580 // aborted. It is better to handle it this way because the
1581 // callbackEvent() function probably should return before the AudioDeviceStop()
1582 // function is called.
1583 static void *coreStopStream( void *ptr )
1585 CallbackInfo *info = (CallbackInfo *) ptr;
1586 RtApiCore *object = (RtApiCore *) info->object;
1588 object->stopStream();
1589 pthread_exit( NULL );
1592 bool RtApiCore :: callbackEvent( AudioDeviceID deviceId,
1593 const AudioBufferList *inBufferList,
1594 const AudioBufferList *outBufferList )
1596 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
1597 if ( stream_.state == STREAM_CLOSED ) {
1598 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
1599 error( RtAudioError::WARNING );
1603 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
1604 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1606 // Check if we were draining the stream and signal is finished.
1607 if ( handle->drainCounter > 3 ) {
1608 ThreadHandle threadId;
1610 stream_.state = STREAM_STOPPING;
1611 if ( handle->internalDrain == true )
1612 pthread_create( &threadId, NULL, coreStopStream, info );
1613 else // external call to stopStream()
1614 pthread_cond_signal( &handle->condition );
1618 AudioDeviceID outputDevice = handle->id[0];
1620 // Invoke user callback to get fresh output data UNLESS we are
1621 // draining stream or duplex mode AND the input/output devices are
1622 // different AND this function is called for the input device.
1623 if ( handle->drainCounter == 0 && ( stream_.mode != DUPLEX || deviceId == outputDevice ) ) {
1624 RtAudioCallback callback = (RtAudioCallback) info->callback;
1625 double streamTime = getStreamTime();
1626 RtAudioStreamStatus status = 0;
1627 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
1628 status |= RTAUDIO_OUTPUT_UNDERFLOW;
1629 handle->xrun[0] = false;
1631 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
1632 status |= RTAUDIO_INPUT_OVERFLOW;
1633 handle->xrun[1] = false;
1636 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
1637 stream_.bufferSize, streamTime, status, info->userData );
1638 if ( cbReturnValue == 2 ) {
1639 stream_.state = STREAM_STOPPING;
1640 handle->drainCounter = 2;
1644 else if ( cbReturnValue == 1 ) {
1645 handle->drainCounter = 1;
1646 handle->internalDrain = true;
1650 if ( stream_.mode == OUTPUT || ( stream_.mode == DUPLEX && deviceId == outputDevice ) ) {
1652 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
1654 if ( handle->nStreams[0] == 1 ) {
1655 memset( outBufferList->mBuffers[handle->iStream[0]].mData,
1657 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
1659 else { // fill multiple streams with zeros
1660 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
1661 memset( outBufferList->mBuffers[handle->iStream[0]+i].mData,
1663 outBufferList->mBuffers[handle->iStream[0]+i].mDataByteSize );
1667 else if ( handle->nStreams[0] == 1 ) {
1668 if ( stream_.doConvertBuffer[0] ) { // convert directly to CoreAudio stream buffer
1669 convertBuffer( (char *) outBufferList->mBuffers[handle->iStream[0]].mData,
1670 stream_.userBuffer[0], stream_.convertInfo[0] );
1672 else { // copy from user buffer
1673 memcpy( outBufferList->mBuffers[handle->iStream[0]].mData,
1674 stream_.userBuffer[0],
1675 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
1678 else { // fill multiple streams
1679 Float32 *inBuffer = (Float32 *) stream_.userBuffer[0];
1680 if ( stream_.doConvertBuffer[0] ) {
1681 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
1682 inBuffer = (Float32 *) stream_.deviceBuffer;
1685 if ( stream_.deviceInterleaved[0] == false ) { // mono mode
1686 UInt32 bufferBytes = outBufferList->mBuffers[handle->iStream[0]].mDataByteSize;
1687 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
1688 memcpy( outBufferList->mBuffers[handle->iStream[0]+i].mData,
1689 (void *)&inBuffer[i*stream_.bufferSize], bufferBytes );
1692 else { // fill multiple multi-channel streams with interleaved data
1693 UInt32 streamChannels, channelsLeft, inJump, outJump, inOffset;
1696 bool inInterleaved = ( stream_.userInterleaved ) ? true : false;
1697 UInt32 inChannels = stream_.nUserChannels[0];
1698 if ( stream_.doConvertBuffer[0] ) {
1699 inInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
1700 inChannels = stream_.nDeviceChannels[0];
1703 if ( inInterleaved ) inOffset = 1;
1704 else inOffset = stream_.bufferSize;
1706 channelsLeft = inChannels;
1707 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
1709 out = (Float32 *) outBufferList->mBuffers[handle->iStream[0]+i].mData;
1710 streamChannels = outBufferList->mBuffers[handle->iStream[0]+i].mNumberChannels;
1713 // Account for possible channel offset in first stream
1714 if ( i == 0 && stream_.channelOffset[0] > 0 ) {
1715 streamChannels -= stream_.channelOffset[0];
1716 outJump = stream_.channelOffset[0];
1720 // Account for possible unfilled channels at end of the last stream
1721 if ( streamChannels > channelsLeft ) {
1722 outJump = streamChannels - channelsLeft;
1723 streamChannels = channelsLeft;
1726 // Determine input buffer offsets and skips
1727 if ( inInterleaved ) {
1728 inJump = inChannels;
1729 in += inChannels - channelsLeft;
1733 in += (inChannels - channelsLeft) * inOffset;
1736 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
1737 for ( unsigned int j=0; j<streamChannels; j++ ) {
1738 *out++ = in[j*inOffset];
1743 channelsLeft -= streamChannels;
1749 // Don't bother draining input
1750 if ( handle->drainCounter ) {
1751 handle->drainCounter++;
1755 AudioDeviceID inputDevice;
1756 inputDevice = handle->id[1];
1757 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && deviceId == inputDevice ) ) {
1759 if ( handle->nStreams[1] == 1 ) {
1760 if ( stream_.doConvertBuffer[1] ) { // convert directly from CoreAudio stream buffer
1761 convertBuffer( stream_.userBuffer[1],
1762 (char *) inBufferList->mBuffers[handle->iStream[1]].mData,
1763 stream_.convertInfo[1] );
1765 else { // copy to user buffer
1766 memcpy( stream_.userBuffer[1],
1767 inBufferList->mBuffers[handle->iStream[1]].mData,
1768 inBufferList->mBuffers[handle->iStream[1]].mDataByteSize );
1771 else { // read from multiple streams
1772 Float32 *outBuffer = (Float32 *) stream_.userBuffer[1];
1773 if ( stream_.doConvertBuffer[1] ) outBuffer = (Float32 *) stream_.deviceBuffer;
1775 if ( stream_.deviceInterleaved[1] == false ) { // mono mode
1776 UInt32 bufferBytes = inBufferList->mBuffers[handle->iStream[1]].mDataByteSize;
1777 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
1778 memcpy( (void *)&outBuffer[i*stream_.bufferSize],
1779 inBufferList->mBuffers[handle->iStream[1]+i].mData, bufferBytes );
1782 else { // read from multiple multi-channel streams
1783 UInt32 streamChannels, channelsLeft, inJump, outJump, outOffset;
1786 bool outInterleaved = ( stream_.userInterleaved ) ? true : false;
1787 UInt32 outChannels = stream_.nUserChannels[1];
1788 if ( stream_.doConvertBuffer[1] ) {
1789 outInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
1790 outChannels = stream_.nDeviceChannels[1];
1793 if ( outInterleaved ) outOffset = 1;
1794 else outOffset = stream_.bufferSize;
1796 channelsLeft = outChannels;
1797 for ( unsigned int i=0; i<handle->nStreams[1]; i++ ) {
1799 in = (Float32 *) inBufferList->mBuffers[handle->iStream[1]+i].mData;
1800 streamChannels = inBufferList->mBuffers[handle->iStream[1]+i].mNumberChannels;
1803 // Account for possible channel offset in first stream
1804 if ( i == 0 && stream_.channelOffset[1] > 0 ) {
1805 streamChannels -= stream_.channelOffset[1];
1806 inJump = stream_.channelOffset[1];
1810 // Account for possible unread channels at end of the last stream
1811 if ( streamChannels > channelsLeft ) {
1812 inJump = streamChannels - channelsLeft;
1813 streamChannels = channelsLeft;
1816 // Determine output buffer offsets and skips
1817 if ( outInterleaved ) {
1818 outJump = outChannels;
1819 out += outChannels - channelsLeft;
1823 out += (outChannels - channelsLeft) * outOffset;
1826 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
1827 for ( unsigned int j=0; j<streamChannels; j++ ) {
1828 out[j*outOffset] = *in++;
1833 channelsLeft -= streamChannels;
1837 if ( stream_.doConvertBuffer[1] ) { // convert from our internal "device" buffer
1838 convertBuffer( stream_.userBuffer[1],
1839 stream_.deviceBuffer,
1840 stream_.convertInfo[1] );
1846 //MUTEX_UNLOCK( &stream_.mutex );
1848 RtApi::tickStreamTime();
1852 const char* RtApiCore :: getErrorCode( OSStatus code )
1856 case kAudioHardwareNotRunningError:
1857 return "kAudioHardwareNotRunningError";
1859 case kAudioHardwareUnspecifiedError:
1860 return "kAudioHardwareUnspecifiedError";
1862 case kAudioHardwareUnknownPropertyError:
1863 return "kAudioHardwareUnknownPropertyError";
1865 case kAudioHardwareBadPropertySizeError:
1866 return "kAudioHardwareBadPropertySizeError";
1868 case kAudioHardwareIllegalOperationError:
1869 return "kAudioHardwareIllegalOperationError";
1871 case kAudioHardwareBadObjectError:
1872 return "kAudioHardwareBadObjectError";
1874 case kAudioHardwareBadDeviceError:
1875 return "kAudioHardwareBadDeviceError";
1877 case kAudioHardwareBadStreamError:
1878 return "kAudioHardwareBadStreamError";
1880 case kAudioHardwareUnsupportedOperationError:
1881 return "kAudioHardwareUnsupportedOperationError";
1883 case kAudioDeviceUnsupportedFormatError:
1884 return "kAudioDeviceUnsupportedFormatError";
1886 case kAudioDevicePermissionsError:
1887 return "kAudioDevicePermissionsError";
1890 return "CoreAudio unknown error";
1894 //******************** End of __MACOSX_CORE__ *********************//
1897 #if defined(__UNIX_JACK__)
1899 // JACK is a low-latency audio server, originally written for the
1900 // GNU/Linux operating system and now also ported to OS-X. It can
1901 // connect a number of different applications to an audio device, as
1902 // well as allowing them to share audio between themselves.
1904 // When using JACK with RtAudio, "devices" refer to JACK clients that
1905 // have ports connected to the server. The JACK server is typically
1906 // started in a terminal as follows:
1908 // .jackd -d alsa -d hw:0
1910 // or through an interface program such as qjackctl. Many of the
1911 // parameters normally set for a stream are fixed by the JACK server
1912 // and can be specified when the JACK server is started. In
1915 // .jackd -d alsa -d hw:0 -r 44100 -p 512 -n 4
1917 // specifies a sample rate of 44100 Hz, a buffer size of 512 sample
1918 // frames, and number of buffers = 4. Once the server is running, it
1919 // is not possible to override these values. If the values are not
1920 // specified in the command-line, the JACK server uses default values.
1922 // The JACK server does not have to be running when an instance of
1923 // RtApiJack is created, though the function getDeviceCount() will
1924 // report 0 devices found until JACK has been started. When no
1925 // devices are available (i.e., the JACK server is not running), a
1926 // stream cannot be opened.
1928 #include <jack/jack.h>
1932 // A structure to hold various information related to the Jack API
1935 jack_client_t *client;
1936 jack_port_t **ports[2];
1937 std::string deviceName[2];
1939 pthread_cond_t condition;
1940 int drainCounter; // Tracks callback counts when draining
1941 bool internalDrain; // Indicates if stop is initiated from callback or not.
1944 :client(0), drainCounter(0), internalDrain(false) { ports[0] = 0; ports[1] = 0; xrun[0] = false; xrun[1] = false; }
1947 #if !defined(__RTAUDIO_DEBUG__)
1948 static void jackSilentError( const char * ) {};
1951 RtApiJack :: RtApiJack()
1952 :shouldAutoconnect_(true) {
1953 // Nothing to do here.
1954 #if !defined(__RTAUDIO_DEBUG__)
1955 // Turn off Jack's internal error reporting.
1956 jack_set_error_function( &jackSilentError );
1960 RtApiJack :: ~RtApiJack()
1962 if ( stream_.state != STREAM_CLOSED ) closeStream();
1965 unsigned int RtApiJack :: getDeviceCount( void )
1967 // See if we can become a jack client.
1968 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
1969 jack_status_t *status = NULL;
1970 jack_client_t *client = jack_client_open( "RtApiJackCount", options, status );
1971 if ( client == 0 ) return 0;
1974 std::string port, previousPort;
1975 unsigned int nChannels = 0, nDevices = 0;
1976 ports = jack_get_ports( client, NULL, NULL, 0 );
1978 // Parse the port names up to the first colon (:).
1981 port = (char *) ports[ nChannels ];
1982 iColon = port.find(":");
1983 if ( iColon != std::string::npos ) {
1984 port = port.substr( 0, iColon + 1 );
1985 if ( port != previousPort ) {
1987 previousPort = port;
1990 } while ( ports[++nChannels] );
1994 jack_client_close( client );
1998 RtAudio::DeviceInfo RtApiJack :: getDeviceInfo( unsigned int device )
2000 RtAudio::DeviceInfo info;
2001 info.probed = false;
2003 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption
2004 jack_status_t *status = NULL;
2005 jack_client_t *client = jack_client_open( "RtApiJackInfo", options, status );
2006 if ( client == 0 ) {
2007 errorText_ = "RtApiJack::getDeviceInfo: Jack server not found or connection error!";
2008 error( RtAudioError::WARNING );
2013 std::string port, previousPort;
2014 unsigned int nPorts = 0, nDevices = 0;
2015 ports = jack_get_ports( client, NULL, NULL, 0 );
2017 // Parse the port names up to the first colon (:).
2020 port = (char *) ports[ nPorts ];
2021 iColon = port.find(":");
2022 if ( iColon != std::string::npos ) {
2023 port = port.substr( 0, iColon );
2024 if ( port != previousPort ) {
2025 if ( nDevices == device ) info.name = port;
2027 previousPort = port;
2030 } while ( ports[++nPorts] );
2034 if ( device >= nDevices ) {
2035 jack_client_close( client );
2036 errorText_ = "RtApiJack::getDeviceInfo: device ID is invalid!";
2037 error( RtAudioError::INVALID_USE );
2041 // Get the current jack server sample rate.
2042 info.sampleRates.clear();
2044 info.preferredSampleRate = jack_get_sample_rate( client );
2045 info.sampleRates.push_back( info.preferredSampleRate );
2047 // Count the available ports containing the client name as device
2048 // channels. Jack "input ports" equal RtAudio output channels.
2049 unsigned int nChannels = 0;
2050 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsInput );
2052 while ( ports[ nChannels ] ) nChannels++;
2054 info.outputChannels = nChannels;
2057 // Jack "output ports" equal RtAudio input channels.
2059 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsOutput );
2061 while ( ports[ nChannels ] ) nChannels++;
2063 info.inputChannels = nChannels;
2066 if ( info.outputChannels == 0 && info.inputChannels == 0 ) {
2067 jack_client_close(client);
2068 errorText_ = "RtApiJack::getDeviceInfo: error determining Jack input/output channels!";
2069 error( RtAudioError::WARNING );
2073 // If device opens for both playback and capture, we determine the channels.
2074 if ( info.outputChannels > 0 && info.inputChannels > 0 )
2075 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
2077 // Jack always uses 32-bit floats.
2078 info.nativeFormats = RTAUDIO_FLOAT32;
2080 // Jack doesn't provide default devices so we'll use the first available one.
2081 if ( device == 0 && info.outputChannels > 0 )
2082 info.isDefaultOutput = true;
2083 if ( device == 0 && info.inputChannels > 0 )
2084 info.isDefaultInput = true;
2086 jack_client_close(client);
2091 static int jackCallbackHandler( jack_nframes_t nframes, void *infoPointer )
2093 CallbackInfo *info = (CallbackInfo *) infoPointer;
2095 RtApiJack *object = (RtApiJack *) info->object;
2096 if ( object->callbackEvent( (unsigned long) nframes ) == false ) return 1;
2101 // This function will be called by a spawned thread when the Jack
2102 // server signals that it is shutting down. It is necessary to handle
2103 // it this way because the jackShutdown() function must return before
2104 // the jack_deactivate() function (in closeStream()) will return.
2105 static void *jackCloseStream( void *ptr )
2107 CallbackInfo *info = (CallbackInfo *) ptr;
2108 RtApiJack *object = (RtApiJack *) info->object;
2110 object->closeStream();
2112 pthread_exit( NULL );
2114 static void jackShutdown( void *infoPointer )
2116 CallbackInfo *info = (CallbackInfo *) infoPointer;
2117 RtApiJack *object = (RtApiJack *) info->object;
2119 // Check current stream state. If stopped, then we'll assume this
2120 // was called as a result of a call to RtApiJack::stopStream (the
2121 // deactivation of a client handle causes this function to be called).
2122 // If not, we'll assume the Jack server is shutting down or some
2123 // other problem occurred and we should close the stream.
2124 if ( object->isStreamRunning() == false ) return;
2126 ThreadHandle threadId;
2127 pthread_create( &threadId, NULL, jackCloseStream, info );
2128 std::cerr << "\nRtApiJack: the Jack server is shutting down this client ... stream stopped and closed!!\n" << std::endl;
2131 static int jackXrun( void *infoPointer )
2133 JackHandle *handle = *((JackHandle **) infoPointer);
2135 if ( handle->ports[0] ) handle->xrun[0] = true;
2136 if ( handle->ports[1] ) handle->xrun[1] = true;
2141 bool RtApiJack :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
2142 unsigned int firstChannel, unsigned int sampleRate,
2143 RtAudioFormat format, unsigned int *bufferSize,
2144 RtAudio::StreamOptions *options )
2146 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2148 // Look for jack server and try to become a client (only do once per stream).
2149 jack_client_t *client = 0;
2150 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) {
2151 jack_options_t jackoptions = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
2152 jack_status_t *status = NULL;
2153 if ( options && !options->streamName.empty() )
2154 client = jack_client_open( options->streamName.c_str(), jackoptions, status );
2156 client = jack_client_open( "RtApiJack", jackoptions, status );
2157 if ( client == 0 ) {
2158 errorText_ = "RtApiJack::probeDeviceOpen: Jack server not found or connection error!";
2159 error( RtAudioError::WARNING );
2164 // The handle must have been created on an earlier pass.
2165 client = handle->client;
2169 std::string port, previousPort, deviceName;
2170 unsigned int nPorts = 0, nDevices = 0;
2171 ports = jack_get_ports( client, NULL, NULL, 0 );
2173 // Parse the port names up to the first colon (:).
2176 port = (char *) ports[ nPorts ];
2177 iColon = port.find(":");
2178 if ( iColon != std::string::npos ) {
2179 port = port.substr( 0, iColon );
2180 if ( port != previousPort ) {
2181 if ( nDevices == device ) deviceName = port;
2183 previousPort = port;
2186 } while ( ports[++nPorts] );
2190 if ( device >= nDevices ) {
2191 errorText_ = "RtApiJack::probeDeviceOpen: device ID is invalid!";
2195 // Count the available ports containing the client name as device
2196 // channels. Jack "input ports" equal RtAudio output channels.
2197 unsigned int nChannels = 0;
2198 unsigned long flag = JackPortIsInput;
2199 if ( mode == INPUT ) flag = JackPortIsOutput;
2200 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
2202 while ( ports[ nChannels ] ) nChannels++;
2206 // Compare the jack ports for specified client to the requested number of channels.
2207 if ( nChannels < (channels + firstChannel) ) {
2208 errorStream_ << "RtApiJack::probeDeviceOpen: requested number of channels (" << channels << ") + offset (" << firstChannel << ") not found for specified device (" << device << ":" << deviceName << ").";
2209 errorText_ = errorStream_.str();
2213 // Check the jack server sample rate.
2214 unsigned int jackRate = jack_get_sample_rate( client );
2215 if ( sampleRate != jackRate ) {
2216 jack_client_close( client );
2217 errorStream_ << "RtApiJack::probeDeviceOpen: the requested sample rate (" << sampleRate << ") is different than the JACK server rate (" << jackRate << ").";
2218 errorText_ = errorStream_.str();
2221 stream_.sampleRate = jackRate;
2223 // Get the latency of the JACK port.
2224 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
2225 if ( ports[ firstChannel ] ) {
2227 jack_latency_callback_mode_t cbmode = (mode == INPUT ? JackCaptureLatency : JackPlaybackLatency);
2228 // the range (usually the min and max are equal)
2229 jack_latency_range_t latrange; latrange.min = latrange.max = 0;
2230 // get the latency range
2231 jack_port_get_latency_range( jack_port_by_name( client, ports[firstChannel] ), cbmode, &latrange );
2232 // be optimistic, use the min!
2233 stream_.latency[mode] = latrange.min;
2234 //stream_.latency[mode] = jack_port_get_latency( jack_port_by_name( client, ports[ firstChannel ] ) );
2238 // The jack server always uses 32-bit floating-point data.
2239 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
2240 stream_.userFormat = format;
2242 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
2243 else stream_.userInterleaved = true;
2245 // Jack always uses non-interleaved buffers.
2246 stream_.deviceInterleaved[mode] = false;
2248 // Jack always provides host byte-ordered data.
2249 stream_.doByteSwap[mode] = false;
2251 // Get the buffer size. The buffer size and number of buffers
2252 // (periods) is set when the jack server is started.
2253 stream_.bufferSize = (int) jack_get_buffer_size( client );
2254 *bufferSize = stream_.bufferSize;
2256 stream_.nDeviceChannels[mode] = channels;
2257 stream_.nUserChannels[mode] = channels;
2259 // Set flags for buffer conversion.
2260 stream_.doConvertBuffer[mode] = false;
2261 if ( stream_.userFormat != stream_.deviceFormat[mode] )
2262 stream_.doConvertBuffer[mode] = true;
2263 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
2264 stream_.nUserChannels[mode] > 1 )
2265 stream_.doConvertBuffer[mode] = true;
2267 // Allocate our JackHandle structure for the stream.
2268 if ( handle == 0 ) {
2270 handle = new JackHandle;
2272 catch ( std::bad_alloc& ) {
2273 errorText_ = "RtApiJack::probeDeviceOpen: error allocating JackHandle memory.";
2277 if ( pthread_cond_init(&handle->condition, NULL) ) {
2278 errorText_ = "RtApiJack::probeDeviceOpen: error initializing pthread condition variable.";
2281 stream_.apiHandle = (void *) handle;
2282 handle->client = client;
2284 handle->deviceName[mode] = deviceName;
2286 // Allocate necessary internal buffers.
2287 unsigned long bufferBytes;
2288 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
2289 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
2290 if ( stream_.userBuffer[mode] == NULL ) {
2291 errorText_ = "RtApiJack::probeDeviceOpen: error allocating user buffer memory.";
2295 if ( stream_.doConvertBuffer[mode] ) {
2297 bool makeBuffer = true;
2298 if ( mode == OUTPUT )
2299 bufferBytes = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
2300 else { // mode == INPUT
2301 bufferBytes = stream_.nDeviceChannels[1] * formatBytes( stream_.deviceFormat[1] );
2302 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
2303 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes(stream_.deviceFormat[0]);
2304 if ( bufferBytes < bytesOut ) makeBuffer = false;
2309 bufferBytes *= *bufferSize;
2310 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
2311 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
2312 if ( stream_.deviceBuffer == NULL ) {
2313 errorText_ = "RtApiJack::probeDeviceOpen: error allocating device buffer memory.";
2319 // Allocate memory for the Jack ports (channels) identifiers.
2320 handle->ports[mode] = (jack_port_t **) malloc ( sizeof (jack_port_t *) * channels );
2321 if ( handle->ports[mode] == NULL ) {
2322 errorText_ = "RtApiJack::probeDeviceOpen: error allocating port memory.";
2326 stream_.device[mode] = device;
2327 stream_.channelOffset[mode] = firstChannel;
2328 stream_.state = STREAM_STOPPED;
2329 stream_.callbackInfo.object = (void *) this;
2331 if ( stream_.mode == OUTPUT && mode == INPUT )
2332 // We had already set up the stream for output.
2333 stream_.mode = DUPLEX;
2335 stream_.mode = mode;
2336 jack_set_process_callback( handle->client, jackCallbackHandler, (void *) &stream_.callbackInfo );
2337 jack_set_xrun_callback( handle->client, jackXrun, (void *) &stream_.apiHandle );
2338 jack_on_shutdown( handle->client, jackShutdown, (void *) &stream_.callbackInfo );
2341 // Register our ports.
2343 if ( mode == OUTPUT ) {
2344 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2345 snprintf( label, 64, "outport %d", i );
2346 handle->ports[0][i] = jack_port_register( handle->client, (const char *)label,
2347 JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput, 0 );
2351 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2352 snprintf( label, 64, "inport %d", i );
2353 handle->ports[1][i] = jack_port_register( handle->client, (const char *)label,
2354 JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput, 0 );
2358 // Setup the buffer conversion information structure. We don't use
2359 // buffers to do channel offsets, so we override that parameter
2361 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
2363 if ( options && options->flags & RTAUDIO_JACK_DONT_CONNECT ) shouldAutoconnect_ = false;
2369 pthread_cond_destroy( &handle->condition );
2370 jack_client_close( handle->client );
2372 if ( handle->ports[0] ) free( handle->ports[0] );
2373 if ( handle->ports[1] ) free( handle->ports[1] );
2376 stream_.apiHandle = 0;
2379 for ( int i=0; i<2; i++ ) {
2380 if ( stream_.userBuffer[i] ) {
2381 free( stream_.userBuffer[i] );
2382 stream_.userBuffer[i] = 0;
2386 if ( stream_.deviceBuffer ) {
2387 free( stream_.deviceBuffer );
2388 stream_.deviceBuffer = 0;
2394 void RtApiJack :: closeStream( void )
2396 if ( stream_.state == STREAM_CLOSED ) {
2397 errorText_ = "RtApiJack::closeStream(): no open stream to close!";
2398 error( RtAudioError::WARNING );
2402 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2405 if ( stream_.state == STREAM_RUNNING )
2406 jack_deactivate( handle->client );
2408 jack_client_close( handle->client );
2412 if ( handle->ports[0] ) free( handle->ports[0] );
2413 if ( handle->ports[1] ) free( handle->ports[1] );
2414 pthread_cond_destroy( &handle->condition );
2416 stream_.apiHandle = 0;
2419 for ( int i=0; i<2; i++ ) {
2420 if ( stream_.userBuffer[i] ) {
2421 free( stream_.userBuffer[i] );
2422 stream_.userBuffer[i] = 0;
2426 if ( stream_.deviceBuffer ) {
2427 free( stream_.deviceBuffer );
2428 stream_.deviceBuffer = 0;
2431 stream_.mode = UNINITIALIZED;
2432 stream_.state = STREAM_CLOSED;
2435 void RtApiJack :: startStream( void )
2438 if ( stream_.state == STREAM_RUNNING ) {
2439 errorText_ = "RtApiJack::startStream(): the stream is already running!";
2440 error( RtAudioError::WARNING );
2444 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2445 int result = jack_activate( handle->client );
2447 errorText_ = "RtApiJack::startStream(): unable to activate JACK client!";
2453 // Get the list of available ports.
2454 if ( shouldAutoconnect_ && (stream_.mode == OUTPUT || stream_.mode == DUPLEX) ) {
2456 ports = jack_get_ports( handle->client, handle->deviceName[0].c_str(), NULL, JackPortIsInput);
2457 if ( ports == NULL) {
2458 errorText_ = "RtApiJack::startStream(): error determining available JACK input ports!";
2462 // Now make the port connections. Since RtAudio wasn't designed to
2463 // allow the user to select particular channels of a device, we'll
2464 // just open the first "nChannels" ports with offset.
2465 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2467 if ( ports[ stream_.channelOffset[0] + i ] )
2468 result = jack_connect( handle->client, jack_port_name( handle->ports[0][i] ), ports[ stream_.channelOffset[0] + i ] );
2471 errorText_ = "RtApiJack::startStream(): error connecting output ports!";
2478 if ( shouldAutoconnect_ && (stream_.mode == INPUT || stream_.mode == DUPLEX) ) {
2480 ports = jack_get_ports( handle->client, handle->deviceName[1].c_str(), NULL, JackPortIsOutput );
2481 if ( ports == NULL) {
2482 errorText_ = "RtApiJack::startStream(): error determining available JACK output ports!";
2486 // Now make the port connections. See note above.
2487 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2489 if ( ports[ stream_.channelOffset[1] + i ] )
2490 result = jack_connect( handle->client, ports[ stream_.channelOffset[1] + i ], jack_port_name( handle->ports[1][i] ) );
2493 errorText_ = "RtApiJack::startStream(): error connecting input ports!";
2500 handle->drainCounter = 0;
2501 handle->internalDrain = false;
2502 stream_.state = STREAM_RUNNING;
2505 if ( result == 0 ) return;
2506 error( RtAudioError::SYSTEM_ERROR );
2509 void RtApiJack :: stopStream( void )
2512 if ( stream_.state == STREAM_STOPPED ) {
2513 errorText_ = "RtApiJack::stopStream(): the stream is already stopped!";
2514 error( RtAudioError::WARNING );
2518 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2519 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
2521 if ( handle->drainCounter == 0 ) {
2522 handle->drainCounter = 2;
2523 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
2527 jack_deactivate( handle->client );
2528 stream_.state = STREAM_STOPPED;
2531 void RtApiJack :: abortStream( void )
2534 if ( stream_.state == STREAM_STOPPED ) {
2535 errorText_ = "RtApiJack::abortStream(): the stream is already stopped!";
2536 error( RtAudioError::WARNING );
2540 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2541 handle->drainCounter = 2;
2546 // This function will be called by a spawned thread when the user
2547 // callback function signals that the stream should be stopped or
2548 // aborted. It is necessary to handle it this way because the
2549 // callbackEvent() function must return before the jack_deactivate()
2550 // function will return.
2551 static void *jackStopStream( void *ptr )
2553 CallbackInfo *info = (CallbackInfo *) ptr;
2554 RtApiJack *object = (RtApiJack *) info->object;
2556 object->stopStream();
2557 pthread_exit( NULL );
2560 bool RtApiJack :: callbackEvent( unsigned long nframes )
2562 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
2563 if ( stream_.state == STREAM_CLOSED ) {
2564 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
2565 error( RtAudioError::WARNING );
2568 if ( stream_.bufferSize != nframes ) {
2569 errorText_ = "RtApiCore::callbackEvent(): the JACK buffer size has changed ... cannot process!";
2570 error( RtAudioError::WARNING );
2574 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
2575 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2577 // Check if we were draining the stream and signal is finished.
2578 if ( handle->drainCounter > 3 ) {
2579 ThreadHandle threadId;
2581 stream_.state = STREAM_STOPPING;
2582 if ( handle->internalDrain == true )
2583 pthread_create( &threadId, NULL, jackStopStream, info );
2585 pthread_cond_signal( &handle->condition );
2589 // Invoke user callback first, to get fresh output data.
2590 if ( handle->drainCounter == 0 ) {
2591 RtAudioCallback callback = (RtAudioCallback) info->callback;
2592 double streamTime = getStreamTime();
2593 RtAudioStreamStatus status = 0;
2594 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
2595 status |= RTAUDIO_OUTPUT_UNDERFLOW;
2596 handle->xrun[0] = false;
2598 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
2599 status |= RTAUDIO_INPUT_OVERFLOW;
2600 handle->xrun[1] = false;
2602 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
2603 stream_.bufferSize, streamTime, status, info->userData );
2604 if ( cbReturnValue == 2 ) {
2605 stream_.state = STREAM_STOPPING;
2606 handle->drainCounter = 2;
2608 pthread_create( &id, NULL, jackStopStream, info );
2611 else if ( cbReturnValue == 1 ) {
2612 handle->drainCounter = 1;
2613 handle->internalDrain = true;
2617 jack_default_audio_sample_t *jackbuffer;
2618 unsigned long bufferBytes = nframes * sizeof( jack_default_audio_sample_t );
2619 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
2621 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
2623 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
2624 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2625 memset( jackbuffer, 0, bufferBytes );
2629 else if ( stream_.doConvertBuffer[0] ) {
2631 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
2633 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
2634 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2635 memcpy( jackbuffer, &stream_.deviceBuffer[i*bufferBytes], bufferBytes );
2638 else { // no buffer conversion
2639 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2640 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2641 memcpy( jackbuffer, &stream_.userBuffer[0][i*bufferBytes], bufferBytes );
2646 // Don't bother draining input
2647 if ( handle->drainCounter ) {
2648 handle->drainCounter++;
2652 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
2654 if ( stream_.doConvertBuffer[1] ) {
2655 for ( unsigned int i=0; i<stream_.nDeviceChannels[1]; i++ ) {
2656 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
2657 memcpy( &stream_.deviceBuffer[i*bufferBytes], jackbuffer, bufferBytes );
2659 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
2661 else { // no buffer conversion
2662 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2663 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
2664 memcpy( &stream_.userBuffer[1][i*bufferBytes], jackbuffer, bufferBytes );
2670 RtApi::tickStreamTime();
2673 //******************** End of __UNIX_JACK__ *********************//
2676 #if defined(__WINDOWS_ASIO__) // ASIO API on Windows
2678 // The ASIO API is designed around a callback scheme, so this
2679 // implementation is similar to that used for OS-X CoreAudio and Linux
2680 // Jack. The primary constraint with ASIO is that it only allows
2681 // access to a single driver at a time. Thus, it is not possible to
2682 // have more than one simultaneous RtAudio stream.
2684 // This implementation also requires a number of external ASIO files
2685 // and a few global variables. The ASIO callback scheme does not
2686 // allow for the passing of user data, so we must create a global
2687 // pointer to our callbackInfo structure.
2689 // On unix systems, we make use of a pthread condition variable.
2690 // Since there is no equivalent in Windows, I hacked something based
2691 // on information found in
2692 // http://www.cs.wustl.edu/~schmidt/win32-cv-1.html.
2694 #include "asiosys.h"
2696 #include "iasiothiscallresolver.h"
2697 #include "asiodrivers.h"
2700 static AsioDrivers drivers;
2701 static ASIOCallbacks asioCallbacks;
2702 static ASIODriverInfo driverInfo;
2703 static CallbackInfo *asioCallbackInfo;
2704 static bool asioXRun;
2707 int drainCounter; // Tracks callback counts when draining
2708 bool internalDrain; // Indicates if stop is initiated from callback or not.
2709 ASIOBufferInfo *bufferInfos;
2713 :drainCounter(0), internalDrain(false), bufferInfos(0) {}
2716 // Function declarations (definitions at end of section)
2717 static const char* getAsioErrorString( ASIOError result );
2718 static void sampleRateChanged( ASIOSampleRate sRate );
2719 static long asioMessages( long selector, long value, void* message, double* opt );
2721 RtApiAsio :: RtApiAsio()
2723 // ASIO cannot run on a multi-threaded appartment. You can call
2724 // CoInitialize beforehand, but it must be for appartment threading
2725 // (in which case, CoInitilialize will return S_FALSE here).
2726 coInitialized_ = false;
2727 HRESULT hr = CoInitialize( NULL );
2729 errorText_ = "RtApiAsio::ASIO requires a single-threaded appartment. Call CoInitializeEx(0,COINIT_APARTMENTTHREADED)";
2730 error( RtAudioError::WARNING );
2732 coInitialized_ = true;
2734 drivers.removeCurrentDriver();
2735 driverInfo.asioVersion = 2;
2737 // See note in DirectSound implementation about GetDesktopWindow().
2738 driverInfo.sysRef = GetForegroundWindow();
2741 RtApiAsio :: ~RtApiAsio()
2743 if ( stream_.state != STREAM_CLOSED ) closeStream();
2744 if ( coInitialized_ ) CoUninitialize();
2747 unsigned int RtApiAsio :: getDeviceCount( void )
2749 return (unsigned int) drivers.asioGetNumDev();
2752 RtAudio::DeviceInfo RtApiAsio :: getDeviceInfo( unsigned int device )
2754 RtAudio::DeviceInfo info;
2755 info.probed = false;
2758 unsigned int nDevices = getDeviceCount();
2759 if ( nDevices == 0 ) {
2760 errorText_ = "RtApiAsio::getDeviceInfo: no devices found!";
2761 error( RtAudioError::INVALID_USE );
2765 if ( device >= nDevices ) {
2766 errorText_ = "RtApiAsio::getDeviceInfo: device ID is invalid!";
2767 error( RtAudioError::INVALID_USE );
2771 // If a stream is already open, we cannot probe other devices. Thus, use the saved results.
2772 if ( stream_.state != STREAM_CLOSED ) {
2773 if ( device >= devices_.size() ) {
2774 errorText_ = "RtApiAsio::getDeviceInfo: device ID was not present before stream was opened.";
2775 error( RtAudioError::WARNING );
2778 return devices_[ device ];
2781 char driverName[32];
2782 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
2783 if ( result != ASE_OK ) {
2784 errorStream_ << "RtApiAsio::getDeviceInfo: unable to get driver name (" << getAsioErrorString( result ) << ").";
2785 errorText_ = errorStream_.str();
2786 error( RtAudioError::WARNING );
2790 info.name = driverName;
2792 if ( !drivers.loadDriver( driverName ) ) {
2793 errorStream_ << "RtApiAsio::getDeviceInfo: unable to load driver (" << driverName << ").";
2794 errorText_ = errorStream_.str();
2795 error( RtAudioError::WARNING );
2799 result = ASIOInit( &driverInfo );
2800 if ( result != ASE_OK ) {
2801 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
2802 errorText_ = errorStream_.str();
2803 error( RtAudioError::WARNING );
2807 // Determine the device channel information.
2808 long inputChannels, outputChannels;
2809 result = ASIOGetChannels( &inputChannels, &outputChannels );
2810 if ( result != ASE_OK ) {
2811 drivers.removeCurrentDriver();
2812 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
2813 errorText_ = errorStream_.str();
2814 error( RtAudioError::WARNING );
2818 info.outputChannels = outputChannels;
2819 info.inputChannels = inputChannels;
2820 if ( info.outputChannels > 0 && info.inputChannels > 0 )
2821 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
2823 // Determine the supported sample rates.
2824 info.sampleRates.clear();
2825 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
2826 result = ASIOCanSampleRate( (ASIOSampleRate) SAMPLE_RATES[i] );
2827 if ( result == ASE_OK ) {
2828 info.sampleRates.push_back( SAMPLE_RATES[i] );
2830 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
2831 info.preferredSampleRate = SAMPLE_RATES[i];
2835 // Determine supported data types ... just check first channel and assume rest are the same.
2836 ASIOChannelInfo channelInfo;
2837 channelInfo.channel = 0;
2838 channelInfo.isInput = true;
2839 if ( info.inputChannels <= 0 ) channelInfo.isInput = false;
2840 result = ASIOGetChannelInfo( &channelInfo );
2841 if ( result != ASE_OK ) {
2842 drivers.removeCurrentDriver();
2843 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting driver channel info (" << driverName << ").";
2844 errorText_ = errorStream_.str();
2845 error( RtAudioError::WARNING );
2849 info.nativeFormats = 0;
2850 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB )
2851 info.nativeFormats |= RTAUDIO_SINT16;
2852 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB )
2853 info.nativeFormats |= RTAUDIO_SINT32;
2854 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB )
2855 info.nativeFormats |= RTAUDIO_FLOAT32;
2856 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB )
2857 info.nativeFormats |= RTAUDIO_FLOAT64;
2858 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB )
2859 info.nativeFormats |= RTAUDIO_SINT24;
2861 if ( info.outputChannels > 0 )
2862 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
2863 if ( info.inputChannels > 0 )
2864 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
2867 drivers.removeCurrentDriver();
2871 static void bufferSwitch( long index, ASIOBool /*processNow*/ )
2873 RtApiAsio *object = (RtApiAsio *) asioCallbackInfo->object;
2874 object->callbackEvent( index );
2877 void RtApiAsio :: saveDeviceInfo( void )
2881 unsigned int nDevices = getDeviceCount();
2882 devices_.resize( nDevices );
2883 for ( unsigned int i=0; i<nDevices; i++ )
2884 devices_[i] = getDeviceInfo( i );
2887 bool RtApiAsio :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
2888 unsigned int firstChannel, unsigned int sampleRate,
2889 RtAudioFormat format, unsigned int *bufferSize,
2890 RtAudio::StreamOptions *options )
2891 {////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
2893 bool isDuplexInput = mode == INPUT && stream_.mode == OUTPUT;
2895 // For ASIO, a duplex stream MUST use the same driver.
2896 if ( isDuplexInput && stream_.device[0] != device ) {
2897 errorText_ = "RtApiAsio::probeDeviceOpen: an ASIO duplex stream must use the same device for input and output!";
2901 char driverName[32];
2902 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
2903 if ( result != ASE_OK ) {
2904 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to get driver name (" << getAsioErrorString( result ) << ").";
2905 errorText_ = errorStream_.str();
2909 // Only load the driver once for duplex stream.
2910 if ( !isDuplexInput ) {
2911 // The getDeviceInfo() function will not work when a stream is open
2912 // because ASIO does not allow multiple devices to run at the same
2913 // time. Thus, we'll probe the system before opening a stream and
2914 // save the results for use by getDeviceInfo().
2915 this->saveDeviceInfo();
2917 if ( !drivers.loadDriver( driverName ) ) {
2918 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to load driver (" << driverName << ").";
2919 errorText_ = errorStream_.str();
2923 result = ASIOInit( &driverInfo );
2924 if ( result != ASE_OK ) {
2925 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
2926 errorText_ = errorStream_.str();
2931 // keep them before any "goto error", they are used for error cleanup + goto device boundary checks
2932 bool buffersAllocated = false;
2933 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
2934 unsigned int nChannels;
2937 // Check the device channel count.
2938 long inputChannels, outputChannels;
2939 result = ASIOGetChannels( &inputChannels, &outputChannels );
2940 if ( result != ASE_OK ) {
2941 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
2942 errorText_ = errorStream_.str();
2946 if ( ( mode == OUTPUT && (channels+firstChannel) > (unsigned int) outputChannels) ||
2947 ( mode == INPUT && (channels+firstChannel) > (unsigned int) inputChannels) ) {
2948 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested channel count (" << channels << ") + offset (" << firstChannel << ").";
2949 errorText_ = errorStream_.str();
2952 stream_.nDeviceChannels[mode] = channels;
2953 stream_.nUserChannels[mode] = channels;
2954 stream_.channelOffset[mode] = firstChannel;
2956 // Verify the sample rate is supported.
2957 result = ASIOCanSampleRate( (ASIOSampleRate) sampleRate );
2958 if ( result != ASE_OK ) {
2959 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested sample rate (" << sampleRate << ").";
2960 errorText_ = errorStream_.str();
2964 // Get the current sample rate
2965 ASIOSampleRate currentRate;
2966 result = ASIOGetSampleRate( ¤tRate );
2967 if ( result != ASE_OK ) {
2968 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error getting sample rate.";
2969 errorText_ = errorStream_.str();
2973 // Set the sample rate only if necessary
2974 if ( currentRate != sampleRate ) {
2975 result = ASIOSetSampleRate( (ASIOSampleRate) sampleRate );
2976 if ( result != ASE_OK ) {
2977 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error setting sample rate (" << sampleRate << ").";
2978 errorText_ = errorStream_.str();
2983 // Determine the driver data type.
2984 ASIOChannelInfo channelInfo;
2985 channelInfo.channel = 0;
2986 if ( mode == OUTPUT ) channelInfo.isInput = false;
2987 else channelInfo.isInput = true;
2988 result = ASIOGetChannelInfo( &channelInfo );
2989 if ( result != ASE_OK ) {
2990 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting data format.";
2991 errorText_ = errorStream_.str();
2995 // Assuming WINDOWS host is always little-endian.
2996 stream_.doByteSwap[mode] = false;
2997 stream_.userFormat = format;
2998 stream_.deviceFormat[mode] = 0;
2999 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB ) {
3000 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
3001 if ( channelInfo.type == ASIOSTInt16MSB ) stream_.doByteSwap[mode] = true;
3003 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB ) {
3004 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
3005 if ( channelInfo.type == ASIOSTInt32MSB ) stream_.doByteSwap[mode] = true;
3007 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB ) {
3008 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
3009 if ( channelInfo.type == ASIOSTFloat32MSB ) stream_.doByteSwap[mode] = true;
3011 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB ) {
3012 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
3013 if ( channelInfo.type == ASIOSTFloat64MSB ) stream_.doByteSwap[mode] = true;
3015 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB ) {
3016 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
3017 if ( channelInfo.type == ASIOSTInt24MSB ) stream_.doByteSwap[mode] = true;
3020 if ( stream_.deviceFormat[mode] == 0 ) {
3021 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") data format not supported by RtAudio.";
3022 errorText_ = errorStream_.str();
3026 // Set the buffer size. For a duplex stream, this will end up
3027 // setting the buffer size based on the input constraints, which
3029 long minSize, maxSize, preferSize, granularity;
3030 result = ASIOGetBufferSize( &minSize, &maxSize, &preferSize, &granularity );
3031 if ( result != ASE_OK ) {
3032 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting buffer size.";
3033 errorText_ = errorStream_.str();
3037 if ( isDuplexInput ) {
3038 // When this is the duplex input (output was opened before), then we have to use the same
3039 // buffersize as the output, because it might use the preferred buffer size, which most
3040 // likely wasn't passed as input to this. The buffer sizes have to be identically anyway,
3041 // So instead of throwing an error, make them equal. The caller uses the reference
3042 // to the "bufferSize" param as usual to set up processing buffers.
3044 *bufferSize = stream_.bufferSize;
3047 if ( *bufferSize == 0 ) *bufferSize = preferSize;
3048 else if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
3049 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
3050 else if ( granularity == -1 ) {
3051 // Make sure bufferSize is a power of two.
3052 int log2_of_min_size = 0;
3053 int log2_of_max_size = 0;
3055 for ( unsigned int i = 0; i < sizeof(long) * 8; i++ ) {
3056 if ( minSize & ((long)1 << i) ) log2_of_min_size = i;
3057 if ( maxSize & ((long)1 << i) ) log2_of_max_size = i;
3060 long min_delta = std::abs( (long)*bufferSize - ((long)1 << log2_of_min_size) );
3061 int min_delta_num = log2_of_min_size;
3063 for (int i = log2_of_min_size + 1; i <= log2_of_max_size; i++) {
3064 long current_delta = std::abs( (long)*bufferSize - ((long)1 << i) );
3065 if (current_delta < min_delta) {
3066 min_delta = current_delta;
3071 *bufferSize = ( (unsigned int)1 << min_delta_num );
3072 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
3073 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
3075 else if ( granularity != 0 ) {
3076 // Set to an even multiple of granularity, rounding up.
3077 *bufferSize = (*bufferSize + granularity-1) / granularity * granularity;
3082 // we don't use it anymore, see above!
3083 // Just left it here for the case...
3084 if ( isDuplexInput && stream_.bufferSize != *bufferSize ) {
3085 errorText_ = "RtApiAsio::probeDeviceOpen: input/output buffersize discrepancy!";
3090 stream_.bufferSize = *bufferSize;
3091 stream_.nBuffers = 2;
3093 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
3094 else stream_.userInterleaved = true;
3096 // ASIO always uses non-interleaved buffers.
3097 stream_.deviceInterleaved[mode] = false;
3099 // Allocate, if necessary, our AsioHandle structure for the stream.
3100 if ( handle == 0 ) {
3102 handle = new AsioHandle;
3104 catch ( std::bad_alloc& ) {
3105 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating AsioHandle memory.";
3108 handle->bufferInfos = 0;
3110 // Create a manual-reset event.
3111 handle->condition = CreateEvent( NULL, // no security
3112 TRUE, // manual-reset
3113 FALSE, // non-signaled initially
3115 stream_.apiHandle = (void *) handle;
3118 // Create the ASIO internal buffers. Since RtAudio sets up input
3119 // and output separately, we'll have to dispose of previously
3120 // created output buffers for a duplex stream.
3121 if ( mode == INPUT && stream_.mode == OUTPUT ) {
3122 ASIODisposeBuffers();
3123 if ( handle->bufferInfos ) free( handle->bufferInfos );
3126 // Allocate, initialize, and save the bufferInfos in our stream callbackInfo structure.
3128 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
3129 handle->bufferInfos = (ASIOBufferInfo *) malloc( nChannels * sizeof(ASIOBufferInfo) );
3130 if ( handle->bufferInfos == NULL ) {
3131 errorStream_ << "RtApiAsio::probeDeviceOpen: error allocating bufferInfo memory for driver (" << driverName << ").";
3132 errorText_ = errorStream_.str();
3136 ASIOBufferInfo *infos;
3137 infos = handle->bufferInfos;
3138 for ( i=0; i<stream_.nDeviceChannels[0]; i++, infos++ ) {
3139 infos->isInput = ASIOFalse;
3140 infos->channelNum = i + stream_.channelOffset[0];
3141 infos->buffers[0] = infos->buffers[1] = 0;
3143 for ( i=0; i<stream_.nDeviceChannels[1]; i++, infos++ ) {
3144 infos->isInput = ASIOTrue;
3145 infos->channelNum = i + stream_.channelOffset[1];
3146 infos->buffers[0] = infos->buffers[1] = 0;
3149 // prepare for callbacks
3150 stream_.sampleRate = sampleRate;
3151 stream_.device[mode] = device;
3152 stream_.mode = isDuplexInput ? DUPLEX : mode;
3154 // store this class instance before registering callbacks, that are going to use it
3155 asioCallbackInfo = &stream_.callbackInfo;
3156 stream_.callbackInfo.object = (void *) this;
3158 // Set up the ASIO callback structure and create the ASIO data buffers.
3159 asioCallbacks.bufferSwitch = &bufferSwitch;
3160 asioCallbacks.sampleRateDidChange = &sampleRateChanged;
3161 asioCallbacks.asioMessage = &asioMessages;
3162 asioCallbacks.bufferSwitchTimeInfo = NULL;
3163 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
3164 if ( result != ASE_OK ) {
3165 // Standard method failed. This can happen with strict/misbehaving drivers that return valid buffer size ranges
3166 // but only accept the preferred buffer size as parameter for ASIOCreateBuffers. eg. Creatives ASIO driver
3167 // in that case, let's be naïve and try that instead
3168 *bufferSize = preferSize;
3169 stream_.bufferSize = *bufferSize;
3170 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
3173 if ( result != ASE_OK ) {
3174 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") creating buffers.";
3175 errorText_ = errorStream_.str();
3178 buffersAllocated = true;
3179 stream_.state = STREAM_STOPPED;
3181 // Set flags for buffer conversion.
3182 stream_.doConvertBuffer[mode] = false;
3183 if ( stream_.userFormat != stream_.deviceFormat[mode] )
3184 stream_.doConvertBuffer[mode] = true;
3185 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
3186 stream_.nUserChannels[mode] > 1 )
3187 stream_.doConvertBuffer[mode] = true;
3189 // Allocate necessary internal buffers
3190 unsigned long bufferBytes;
3191 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
3192 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
3193 if ( stream_.userBuffer[mode] == NULL ) {
3194 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating user buffer memory.";
3198 if ( stream_.doConvertBuffer[mode] ) {
3200 bool makeBuffer = true;
3201 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
3202 if ( isDuplexInput && stream_.deviceBuffer ) {
3203 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
3204 if ( bufferBytes <= bytesOut ) makeBuffer = false;
3208 bufferBytes *= *bufferSize;
3209 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
3210 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
3211 if ( stream_.deviceBuffer == NULL ) {
3212 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating device buffer memory.";
3218 // Determine device latencies
3219 long inputLatency, outputLatency;
3220 result = ASIOGetLatencies( &inputLatency, &outputLatency );
3221 if ( result != ASE_OK ) {
3222 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting latency.";
3223 errorText_ = errorStream_.str();
3224 error( RtAudioError::WARNING); // warn but don't fail
3227 stream_.latency[0] = outputLatency;
3228 stream_.latency[1] = inputLatency;
3231 // Setup the buffer conversion information structure. We don't use
3232 // buffers to do channel offsets, so we override that parameter
3234 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
3239 if ( !isDuplexInput ) {
3240 // the cleanup for error in the duplex input, is done by RtApi::openStream
3241 // So we clean up for single channel only
3243 if ( buffersAllocated )
3244 ASIODisposeBuffers();
3246 drivers.removeCurrentDriver();
3249 CloseHandle( handle->condition );
3250 if ( handle->bufferInfos )
3251 free( handle->bufferInfos );
3254 stream_.apiHandle = 0;
3258 if ( stream_.userBuffer[mode] ) {
3259 free( stream_.userBuffer[mode] );
3260 stream_.userBuffer[mode] = 0;
3263 if ( stream_.deviceBuffer ) {
3264 free( stream_.deviceBuffer );
3265 stream_.deviceBuffer = 0;
3270 }////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
3272 void RtApiAsio :: closeStream()
3274 if ( stream_.state == STREAM_CLOSED ) {
3275 errorText_ = "RtApiAsio::closeStream(): no open stream to close!";
3276 error( RtAudioError::WARNING );
3280 if ( stream_.state == STREAM_RUNNING ) {
3281 stream_.state = STREAM_STOPPED;
3284 ASIODisposeBuffers();
3285 drivers.removeCurrentDriver();
3287 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3289 CloseHandle( handle->condition );
3290 if ( handle->bufferInfos )
3291 free( handle->bufferInfos );
3293 stream_.apiHandle = 0;
3296 for ( int i=0; i<2; i++ ) {
3297 if ( stream_.userBuffer[i] ) {
3298 free( stream_.userBuffer[i] );
3299 stream_.userBuffer[i] = 0;
3303 if ( stream_.deviceBuffer ) {
3304 free( stream_.deviceBuffer );
3305 stream_.deviceBuffer = 0;
3308 stream_.mode = UNINITIALIZED;
3309 stream_.state = STREAM_CLOSED;
3312 bool stopThreadCalled = false;
3314 void RtApiAsio :: startStream()
3317 if ( stream_.state == STREAM_RUNNING ) {
3318 errorText_ = "RtApiAsio::startStream(): the stream is already running!";
3319 error( RtAudioError::WARNING );
3323 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3324 ASIOError result = ASIOStart();
3325 if ( result != ASE_OK ) {
3326 errorStream_ << "RtApiAsio::startStream: error (" << getAsioErrorString( result ) << ") starting device.";
3327 errorText_ = errorStream_.str();
3331 handle->drainCounter = 0;
3332 handle->internalDrain = false;
3333 ResetEvent( handle->condition );
3334 stream_.state = STREAM_RUNNING;
3338 stopThreadCalled = false;
3340 if ( result == ASE_OK ) return;
3341 error( RtAudioError::SYSTEM_ERROR );
3344 void RtApiAsio :: stopStream()
3347 if ( stream_.state == STREAM_STOPPED ) {
3348 errorText_ = "RtApiAsio::stopStream(): the stream is already stopped!";
3349 error( RtAudioError::WARNING );
3353 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3354 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
3355 if ( handle->drainCounter == 0 ) {
3356 handle->drainCounter = 2;
3357 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
3361 stream_.state = STREAM_STOPPED;
3363 ASIOError result = ASIOStop();
3364 if ( result != ASE_OK ) {
3365 errorStream_ << "RtApiAsio::stopStream: error (" << getAsioErrorString( result ) << ") stopping device.";
3366 errorText_ = errorStream_.str();
3369 if ( result == ASE_OK ) return;
3370 error( RtAudioError::SYSTEM_ERROR );
3373 void RtApiAsio :: abortStream()
3376 if ( stream_.state == STREAM_STOPPED ) {
3377 errorText_ = "RtApiAsio::abortStream(): the stream is already stopped!";
3378 error( RtAudioError::WARNING );
3382 // The following lines were commented-out because some behavior was
3383 // noted where the device buffers need to be zeroed to avoid
3384 // continuing sound, even when the device buffers are completely
3385 // disposed. So now, calling abort is the same as calling stop.
3386 // AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3387 // handle->drainCounter = 2;
3391 // This function will be called by a spawned thread when the user
3392 // callback function signals that the stream should be stopped or
3393 // aborted. It is necessary to handle it this way because the
3394 // callbackEvent() function must return before the ASIOStop()
3395 // function will return.
3396 static unsigned __stdcall asioStopStream( void *ptr )
3398 CallbackInfo *info = (CallbackInfo *) ptr;
3399 RtApiAsio *object = (RtApiAsio *) info->object;
3401 object->stopStream();
3406 bool RtApiAsio :: callbackEvent( long bufferIndex )
3408 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
3409 if ( stream_.state == STREAM_CLOSED ) {
3410 errorText_ = "RtApiAsio::callbackEvent(): the stream is closed ... this shouldn't happen!";
3411 error( RtAudioError::WARNING );
3415 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
3416 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3418 // Check if we were draining the stream and signal if finished.
3419 if ( handle->drainCounter > 3 ) {
3421 stream_.state = STREAM_STOPPING;
3422 if ( handle->internalDrain == false )
3423 SetEvent( handle->condition );
3424 else { // spawn a thread to stop the stream
3426 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
3427 &stream_.callbackInfo, 0, &threadId );
3432 // Invoke user callback to get fresh output data UNLESS we are
3434 if ( handle->drainCounter == 0 ) {
3435 RtAudioCallback callback = (RtAudioCallback) info->callback;
3436 double streamTime = getStreamTime();
3437 RtAudioStreamStatus status = 0;
3438 if ( stream_.mode != INPUT && asioXRun == true ) {
3439 status |= RTAUDIO_OUTPUT_UNDERFLOW;
3442 if ( stream_.mode != OUTPUT && asioXRun == true ) {
3443 status |= RTAUDIO_INPUT_OVERFLOW;
3446 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
3447 stream_.bufferSize, streamTime, status, info->userData );
3448 if ( cbReturnValue == 2 ) {
3449 stream_.state = STREAM_STOPPING;
3450 handle->drainCounter = 2;
3452 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
3453 &stream_.callbackInfo, 0, &threadId );
3456 else if ( cbReturnValue == 1 ) {
3457 handle->drainCounter = 1;
3458 handle->internalDrain = true;
3462 unsigned int nChannels, bufferBytes, i, j;
3463 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
3464 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
3466 bufferBytes = stream_.bufferSize * formatBytes( stream_.deviceFormat[0] );
3468 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
3470 for ( i=0, j=0; i<nChannels; i++ ) {
3471 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3472 memset( handle->bufferInfos[i].buffers[bufferIndex], 0, bufferBytes );
3476 else if ( stream_.doConvertBuffer[0] ) {
3478 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
3479 if ( stream_.doByteSwap[0] )
3480 byteSwapBuffer( stream_.deviceBuffer,
3481 stream_.bufferSize * stream_.nDeviceChannels[0],
3482 stream_.deviceFormat[0] );
3484 for ( i=0, j=0; i<nChannels; i++ ) {
3485 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3486 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
3487 &stream_.deviceBuffer[j++*bufferBytes], bufferBytes );
3493 if ( stream_.doByteSwap[0] )
3494 byteSwapBuffer( stream_.userBuffer[0],
3495 stream_.bufferSize * stream_.nUserChannels[0],
3496 stream_.userFormat );
3498 for ( i=0, j=0; i<nChannels; i++ ) {
3499 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3500 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
3501 &stream_.userBuffer[0][bufferBytes*j++], bufferBytes );
3507 // Don't bother draining input
3508 if ( handle->drainCounter ) {
3509 handle->drainCounter++;
3513 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
3515 bufferBytes = stream_.bufferSize * formatBytes(stream_.deviceFormat[1]);
3517 if (stream_.doConvertBuffer[1]) {
3519 // Always interleave ASIO input data.
3520 for ( i=0, j=0; i<nChannels; i++ ) {
3521 if ( handle->bufferInfos[i].isInput == ASIOTrue )
3522 memcpy( &stream_.deviceBuffer[j++*bufferBytes],
3523 handle->bufferInfos[i].buffers[bufferIndex],
3527 if ( stream_.doByteSwap[1] )
3528 byteSwapBuffer( stream_.deviceBuffer,
3529 stream_.bufferSize * stream_.nDeviceChannels[1],
3530 stream_.deviceFormat[1] );
3531 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
3535 for ( i=0, j=0; i<nChannels; i++ ) {
3536 if ( handle->bufferInfos[i].isInput == ASIOTrue ) {
3537 memcpy( &stream_.userBuffer[1][bufferBytes*j++],
3538 handle->bufferInfos[i].buffers[bufferIndex],
3543 if ( stream_.doByteSwap[1] )
3544 byteSwapBuffer( stream_.userBuffer[1],
3545 stream_.bufferSize * stream_.nUserChannels[1],
3546 stream_.userFormat );
3551 // The following call was suggested by Malte Clasen. While the API
3552 // documentation indicates it should not be required, some device
3553 // drivers apparently do not function correctly without it.
3556 RtApi::tickStreamTime();
3560 static void sampleRateChanged( ASIOSampleRate sRate )
3562 // The ASIO documentation says that this usually only happens during
3563 // external sync. Audio processing is not stopped by the driver,
3564 // actual sample rate might not have even changed, maybe only the
3565 // sample rate status of an AES/EBU or S/PDIF digital input at the
3568 RtApi *object = (RtApi *) asioCallbackInfo->object;
3570 object->stopStream();
3572 catch ( RtAudioError &exception ) {
3573 std::cerr << "\nRtApiAsio: sampleRateChanged() error (" << exception.getMessage() << ")!\n" << std::endl;
3577 std::cerr << "\nRtApiAsio: driver reports sample rate changed to " << sRate << " ... stream stopped!!!\n" << std::endl;
3580 static long asioMessages( long selector, long value, void* /*message*/, double* /*opt*/ )
3584 switch( selector ) {
3585 case kAsioSelectorSupported:
3586 if ( value == kAsioResetRequest
3587 || value == kAsioEngineVersion
3588 || value == kAsioResyncRequest
3589 || value == kAsioLatenciesChanged
3590 // The following three were added for ASIO 2.0, you don't
3591 // necessarily have to support them.
3592 || value == kAsioSupportsTimeInfo
3593 || value == kAsioSupportsTimeCode
3594 || value == kAsioSupportsInputMonitor)
3597 case kAsioResetRequest:
3598 // Defer the task and perform the reset of the driver during the
3599 // next "safe" situation. You cannot reset the driver right now,
3600 // as this code is called from the driver. Reset the driver is
3601 // done by completely destruct is. I.e. ASIOStop(),
3602 // ASIODisposeBuffers(), Destruction Afterwards you initialize the
3604 std::cerr << "\nRtApiAsio: driver reset requested!!!" << std::endl;
3607 case kAsioResyncRequest:
3608 // This informs the application that the driver encountered some
3609 // non-fatal data loss. It is used for synchronization purposes
3610 // of different media. Added mainly to work around the Win16Mutex
3611 // problems in Windows 95/98 with the Windows Multimedia system,
3612 // which could lose data because the Mutex was held too long by
3613 // another thread. However a driver can issue it in other
3615 // std::cerr << "\nRtApiAsio: driver resync requested!!!" << std::endl;
3619 case kAsioLatenciesChanged:
3620 // This will inform the host application that the drivers were
3621 // latencies changed. Beware, it this does not mean that the
3622 // buffer sizes have changed! You might need to update internal
3624 std::cerr << "\nRtApiAsio: driver latency may have changed!!!" << std::endl;
3627 case kAsioEngineVersion:
3628 // Return the supported ASIO version of the host application. If
3629 // a host application does not implement this selector, ASIO 1.0
3630 // is assumed by the driver.
3633 case kAsioSupportsTimeInfo:
3634 // Informs the driver whether the
3635 // asioCallbacks.bufferSwitchTimeInfo() callback is supported.
3636 // For compatibility with ASIO 1.0 drivers the host application
3637 // should always support the "old" bufferSwitch method, too.
3640 case kAsioSupportsTimeCode:
3641 // Informs the driver whether application is interested in time
3642 // code info. If an application does not need to know about time
3643 // code, the driver has less work to do.
3650 static const char* getAsioErrorString( ASIOError result )
3658 static const Messages m[] =
3660 { ASE_NotPresent, "Hardware input or output is not present or available." },
3661 { ASE_HWMalfunction, "Hardware is malfunctioning." },
3662 { ASE_InvalidParameter, "Invalid input parameter." },
3663 { ASE_InvalidMode, "Invalid mode." },
3664 { ASE_SPNotAdvancing, "Sample position not advancing." },
3665 { ASE_NoClock, "Sample clock or rate cannot be determined or is not present." },
3666 { ASE_NoMemory, "Not enough memory to complete the request." }
3669 for ( unsigned int i = 0; i < sizeof(m)/sizeof(m[0]); ++i )
3670 if ( m[i].value == result ) return m[i].message;
3672 return "Unknown error.";
3675 //******************** End of __WINDOWS_ASIO__ *********************//
3679 #if defined(__WINDOWS_WASAPI__) // Windows WASAPI API
3681 // Authored by Marcus Tomlinson <themarcustomlinson@gmail.com>, April 2014
3682 // - Introduces support for the Windows WASAPI API
3683 // - Aims to deliver bit streams to and from hardware at the lowest possible latency, via the absolute minimum buffer sizes required
3684 // - Provides flexible stream configuration to an otherwise strict and inflexible WASAPI interface
3685 // - Includes automatic internal conversion of sample rate and buffer size between hardware and the user
3690 #include <audioclient.h>
3692 #include <mmdeviceapi.h>
3693 #include <functiondiscoverykeys_devpkey.h>
3696 //=============================================================================
3698 #define SAFE_RELEASE( objectPtr )\
3701 objectPtr->Release();\
3705 typedef HANDLE ( __stdcall *TAvSetMmThreadCharacteristicsPtr )( LPCWSTR TaskName, LPDWORD TaskIndex );
3707 //-----------------------------------------------------------------------------
3709 // WASAPI dictates stream sample rate, format, channel count, and in some cases, buffer size.
3710 // Therefore we must perform all necessary conversions to user buffers in order to satisfy these
3711 // requirements. WasapiBuffer ring buffers are used between HwIn->UserIn and UserOut->HwOut to
3712 // provide intermediate storage for read / write synchronization.
3726 // sets the length of the internal ring buffer
3727 void setBufferSize( unsigned int bufferSize, unsigned int formatBytes ) {
3730 buffer_ = ( char* ) calloc( bufferSize, formatBytes );
3732 bufferSize_ = bufferSize;
3737 // attempt to push a buffer into the ring buffer at the current "in" index
3738 bool pushBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
3740 if ( !buffer || // incoming buffer is NULL
3741 bufferSize == 0 || // incoming buffer has no data
3742 bufferSize > bufferSize_ ) // incoming buffer too large
3747 unsigned int relOutIndex = outIndex_;
3748 unsigned int inIndexEnd = inIndex_ + bufferSize;
3749 if ( relOutIndex < inIndex_ && inIndexEnd >= bufferSize_ ) {
3750 relOutIndex += bufferSize_;
3753 // "in" index can end on the "out" index but cannot begin at it
3754 if ( inIndex_ <= relOutIndex && inIndexEnd > relOutIndex ) {
3755 return false; // not enough space between "in" index and "out" index
3758 // copy buffer from external to internal
3759 int fromZeroSize = inIndex_ + bufferSize - bufferSize_;
3760 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
3761 int fromInSize = bufferSize - fromZeroSize;
3766 memcpy( &( ( char* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( char ) );
3767 memcpy( buffer_, &( ( char* ) buffer )[fromInSize], fromZeroSize * sizeof( char ) );
3769 case RTAUDIO_SINT16:
3770 memcpy( &( ( short* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( short ) );
3771 memcpy( buffer_, &( ( short* ) buffer )[fromInSize], fromZeroSize * sizeof( short ) );
3773 case RTAUDIO_SINT24:
3774 memcpy( &( ( S24* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( S24 ) );
3775 memcpy( buffer_, &( ( S24* ) buffer )[fromInSize], fromZeroSize * sizeof( S24 ) );
3777 case RTAUDIO_SINT32:
3778 memcpy( &( ( int* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( int ) );
3779 memcpy( buffer_, &( ( int* ) buffer )[fromInSize], fromZeroSize * sizeof( int ) );
3781 case RTAUDIO_FLOAT32:
3782 memcpy( &( ( float* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( float ) );
3783 memcpy( buffer_, &( ( float* ) buffer )[fromInSize], fromZeroSize * sizeof( float ) );
3785 case RTAUDIO_FLOAT64:
3786 memcpy( &( ( double* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( double ) );
3787 memcpy( buffer_, &( ( double* ) buffer )[fromInSize], fromZeroSize * sizeof( double ) );
3791 // update "in" index
3792 inIndex_ += bufferSize;
3793 inIndex_ %= bufferSize_;
3798 // attempt to pull a buffer from the ring buffer from the current "out" index
3799 bool pullBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
3801 if ( !buffer || // incoming buffer is NULL
3802 bufferSize == 0 || // incoming buffer has no data
3803 bufferSize > bufferSize_ ) // incoming buffer too large
3808 unsigned int relInIndex = inIndex_;
3809 unsigned int outIndexEnd = outIndex_ + bufferSize;
3810 if ( relInIndex < outIndex_ && outIndexEnd >= bufferSize_ ) {
3811 relInIndex += bufferSize_;
3814 // "out" index can begin at and end on the "in" index
3815 if ( outIndex_ < relInIndex && outIndexEnd > relInIndex ) {
3816 return false; // not enough space between "out" index and "in" index
3819 // copy buffer from internal to external
3820 int fromZeroSize = outIndex_ + bufferSize - bufferSize_;
3821 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
3822 int fromOutSize = bufferSize - fromZeroSize;
3827 memcpy( buffer, &( ( char* ) buffer_ )[outIndex_], fromOutSize * sizeof( char ) );
3828 memcpy( &( ( char* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( char ) );
3830 case RTAUDIO_SINT16:
3831 memcpy( buffer, &( ( short* ) buffer_ )[outIndex_], fromOutSize * sizeof( short ) );
3832 memcpy( &( ( short* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( short ) );
3834 case RTAUDIO_SINT24:
3835 memcpy( buffer, &( ( S24* ) buffer_ )[outIndex_], fromOutSize * sizeof( S24 ) );
3836 memcpy( &( ( S24* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( S24 ) );
3838 case RTAUDIO_SINT32:
3839 memcpy( buffer, &( ( int* ) buffer_ )[outIndex_], fromOutSize * sizeof( int ) );
3840 memcpy( &( ( int* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( int ) );
3842 case RTAUDIO_FLOAT32:
3843 memcpy( buffer, &( ( float* ) buffer_ )[outIndex_], fromOutSize * sizeof( float ) );
3844 memcpy( &( ( float* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( float ) );
3846 case RTAUDIO_FLOAT64:
3847 memcpy( buffer, &( ( double* ) buffer_ )[outIndex_], fromOutSize * sizeof( double ) );
3848 memcpy( &( ( double* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( double ) );
3852 // update "out" index
3853 outIndex_ += bufferSize;
3854 outIndex_ %= bufferSize_;
3861 unsigned int bufferSize_;
3862 unsigned int inIndex_;
3863 unsigned int outIndex_;
3866 //-----------------------------------------------------------------------------
3868 // A structure to hold various information related to the WASAPI implementation.
3871 IAudioClient* captureAudioClient;
3872 IAudioClient* renderAudioClient;
3873 IAudioCaptureClient* captureClient;
3874 IAudioRenderClient* renderClient;
3875 HANDLE captureEvent;
3879 : captureAudioClient( NULL ),
3880 renderAudioClient( NULL ),
3881 captureClient( NULL ),
3882 renderClient( NULL ),
3883 captureEvent( NULL ),
3884 renderEvent( NULL ) {}
3887 //=============================================================================
3889 RtApiWasapi::RtApiWasapi()
3890 : coInitialized_( false ), deviceEnumerator_( NULL )
3892 // WASAPI can run either apartment or multi-threaded
3893 HRESULT hr = CoInitialize( NULL );
3894 if ( !FAILED( hr ) )
3895 coInitialized_ = true;
3897 // Instantiate device enumerator
3898 hr = CoCreateInstance( __uuidof( MMDeviceEnumerator ), NULL,
3899 CLSCTX_ALL, __uuidof( IMMDeviceEnumerator ),
3900 ( void** ) &deviceEnumerator_ );
3902 if ( FAILED( hr ) ) {
3903 errorText_ = "RtApiWasapi::RtApiWasapi: Unable to instantiate device enumerator";
3904 error( RtAudioError::DRIVER_ERROR );
3908 //-----------------------------------------------------------------------------
3910 RtApiWasapi::~RtApiWasapi()
3912 if ( stream_.state != STREAM_CLOSED )
3915 SAFE_RELEASE( deviceEnumerator_ );
3917 // If this object previously called CoInitialize()
3918 if ( coInitialized_ )
3922 //=============================================================================
3924 unsigned int RtApiWasapi::getDeviceCount( void )
3926 unsigned int captureDeviceCount = 0;
3927 unsigned int renderDeviceCount = 0;
3929 IMMDeviceCollection* captureDevices = NULL;
3930 IMMDeviceCollection* renderDevices = NULL;
3932 // Count capture devices
3934 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
3935 if ( FAILED( hr ) ) {
3936 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device collection.";
3940 hr = captureDevices->GetCount( &captureDeviceCount );
3941 if ( FAILED( hr ) ) {
3942 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device count.";
3946 // Count render devices
3947 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
3948 if ( FAILED( hr ) ) {
3949 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device collection.";
3953 hr = renderDevices->GetCount( &renderDeviceCount );
3954 if ( FAILED( hr ) ) {
3955 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device count.";
3960 // release all references
3961 SAFE_RELEASE( captureDevices );
3962 SAFE_RELEASE( renderDevices );
3964 if ( errorText_.empty() )
3965 return captureDeviceCount + renderDeviceCount;
3967 error( RtAudioError::DRIVER_ERROR );
3971 //-----------------------------------------------------------------------------
3973 RtAudio::DeviceInfo RtApiWasapi::getDeviceInfo( unsigned int device )
3975 RtAudio::DeviceInfo info;
3976 unsigned int captureDeviceCount = 0;
3977 unsigned int renderDeviceCount = 0;
3978 std::string defaultDeviceName;
3979 bool isCaptureDevice = false;
3981 PROPVARIANT deviceNameProp;
3982 PROPVARIANT defaultDeviceNameProp;
3984 IMMDeviceCollection* captureDevices = NULL;
3985 IMMDeviceCollection* renderDevices = NULL;
3986 IMMDevice* devicePtr = NULL;
3987 IMMDevice* defaultDevicePtr = NULL;
3988 IAudioClient* audioClient = NULL;
3989 IPropertyStore* devicePropStore = NULL;
3990 IPropertyStore* defaultDevicePropStore = NULL;
3992 WAVEFORMATEX* deviceFormat = NULL;
3993 WAVEFORMATEX* closestMatchFormat = NULL;
3996 info.probed = false;
3998 // Count capture devices
4000 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4001 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4002 if ( FAILED( hr ) ) {
4003 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device collection.";
4007 hr = captureDevices->GetCount( &captureDeviceCount );
4008 if ( FAILED( hr ) ) {
4009 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device count.";
4013 // Count render devices
4014 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4015 if ( FAILED( hr ) ) {
4016 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device collection.";
4020 hr = renderDevices->GetCount( &renderDeviceCount );
4021 if ( FAILED( hr ) ) {
4022 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device count.";
4026 // validate device index
4027 if ( device >= captureDeviceCount + renderDeviceCount ) {
4028 errorText_ = "RtApiWasapi::getDeviceInfo: Invalid device index.";
4029 errorType = RtAudioError::INVALID_USE;
4033 // determine whether index falls within capture or render devices
4034 if ( device >= renderDeviceCount ) {
4035 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
4036 if ( FAILED( hr ) ) {
4037 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device handle.";
4040 isCaptureDevice = true;
4043 hr = renderDevices->Item( device, &devicePtr );
4044 if ( FAILED( hr ) ) {
4045 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device handle.";
4048 isCaptureDevice = false;
4051 // get default device name
4052 if ( isCaptureDevice ) {
4053 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eCapture, eConsole, &defaultDevicePtr );
4054 if ( FAILED( hr ) ) {
4055 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default capture device handle.";
4060 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eRender, eConsole, &defaultDevicePtr );
4061 if ( FAILED( hr ) ) {
4062 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default render device handle.";
4067 hr = defaultDevicePtr->OpenPropertyStore( STGM_READ, &defaultDevicePropStore );
4068 if ( FAILED( hr ) ) {
4069 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open default device property store.";
4072 PropVariantInit( &defaultDeviceNameProp );
4074 hr = defaultDevicePropStore->GetValue( PKEY_Device_FriendlyName, &defaultDeviceNameProp );
4075 if ( FAILED( hr ) ) {
4076 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default device property: PKEY_Device_FriendlyName.";
4080 defaultDeviceName = convertCharPointerToStdString(defaultDeviceNameProp.pwszVal);
4083 hr = devicePtr->OpenPropertyStore( STGM_READ, &devicePropStore );
4084 if ( FAILED( hr ) ) {
4085 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open device property store.";
4089 PropVariantInit( &deviceNameProp );
4091 hr = devicePropStore->GetValue( PKEY_Device_FriendlyName, &deviceNameProp );
4092 if ( FAILED( hr ) ) {
4093 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device property: PKEY_Device_FriendlyName.";
4097 info.name =convertCharPointerToStdString(deviceNameProp.pwszVal);
4100 if ( isCaptureDevice ) {
4101 info.isDefaultInput = info.name == defaultDeviceName;
4102 info.isDefaultOutput = false;
4105 info.isDefaultInput = false;
4106 info.isDefaultOutput = info.name == defaultDeviceName;
4110 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL, NULL, ( void** ) &audioClient );
4111 if ( FAILED( hr ) ) {
4112 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device audio client.";
4116 hr = audioClient->GetMixFormat( &deviceFormat );
4117 if ( FAILED( hr ) ) {
4118 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device mix format.";
4122 if ( isCaptureDevice ) {
4123 info.inputChannels = deviceFormat->nChannels;
4124 info.outputChannels = 0;
4125 info.duplexChannels = 0;
4128 info.inputChannels = 0;
4129 info.outputChannels = deviceFormat->nChannels;
4130 info.duplexChannels = 0;
4133 // sample rates (WASAPI only supports the one native sample rate)
4134 info.preferredSampleRate = deviceFormat->nSamplesPerSec;
4136 info.sampleRates.clear();
4137 info.sampleRates.push_back( deviceFormat->nSamplesPerSec );
4140 info.nativeFormats = 0;
4142 if ( deviceFormat->wFormatTag == WAVE_FORMAT_IEEE_FLOAT ||
4143 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
4144 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_IEEE_FLOAT ) )
4146 if ( deviceFormat->wBitsPerSample == 32 ) {
4147 info.nativeFormats |= RTAUDIO_FLOAT32;
4149 else if ( deviceFormat->wBitsPerSample == 64 ) {
4150 info.nativeFormats |= RTAUDIO_FLOAT64;
4153 else if ( deviceFormat->wFormatTag == WAVE_FORMAT_PCM ||
4154 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
4155 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_PCM ) )
4157 if ( deviceFormat->wBitsPerSample == 8 ) {
4158 info.nativeFormats |= RTAUDIO_SINT8;
4160 else if ( deviceFormat->wBitsPerSample == 16 ) {
4161 info.nativeFormats |= RTAUDIO_SINT16;
4163 else if ( deviceFormat->wBitsPerSample == 24 ) {
4164 info.nativeFormats |= RTAUDIO_SINT24;
4166 else if ( deviceFormat->wBitsPerSample == 32 ) {
4167 info.nativeFormats |= RTAUDIO_SINT32;
4175 // release all references
4176 PropVariantClear( &deviceNameProp );
4177 PropVariantClear( &defaultDeviceNameProp );
4179 SAFE_RELEASE( captureDevices );
4180 SAFE_RELEASE( renderDevices );
4181 SAFE_RELEASE( devicePtr );
4182 SAFE_RELEASE( defaultDevicePtr );
4183 SAFE_RELEASE( audioClient );
4184 SAFE_RELEASE( devicePropStore );
4185 SAFE_RELEASE( defaultDevicePropStore );
4187 CoTaskMemFree( deviceFormat );
4188 CoTaskMemFree( closestMatchFormat );
4190 if ( !errorText_.empty() )
4195 //-----------------------------------------------------------------------------
4197 unsigned int RtApiWasapi::getDefaultOutputDevice( void )
4199 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
4200 if ( getDeviceInfo( i ).isDefaultOutput ) {
4208 //-----------------------------------------------------------------------------
4210 unsigned int RtApiWasapi::getDefaultInputDevice( void )
4212 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
4213 if ( getDeviceInfo( i ).isDefaultInput ) {
4221 //-----------------------------------------------------------------------------
4223 void RtApiWasapi::closeStream( void )
4225 if ( stream_.state == STREAM_CLOSED ) {
4226 errorText_ = "RtApiWasapi::closeStream: No open stream to close.";
4227 error( RtAudioError::WARNING );
4231 if ( stream_.state != STREAM_STOPPED )
4234 // clean up stream memory
4235 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient )
4236 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient )
4238 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureClient )
4239 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderClient )
4241 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent )
4242 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent );
4244 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent )
4245 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent );
4247 delete ( WasapiHandle* ) stream_.apiHandle;
4248 stream_.apiHandle = NULL;
4250 for ( int i = 0; i < 2; i++ ) {
4251 if ( stream_.userBuffer[i] ) {
4252 free( stream_.userBuffer[i] );
4253 stream_.userBuffer[i] = 0;
4257 if ( stream_.deviceBuffer ) {
4258 free( stream_.deviceBuffer );
4259 stream_.deviceBuffer = 0;
4262 // update stream state
4263 stream_.state = STREAM_CLOSED;
4266 //-----------------------------------------------------------------------------
4268 void RtApiWasapi::startStream( void )
4272 if ( stream_.state == STREAM_RUNNING ) {
4273 errorText_ = "RtApiWasapi::startStream: The stream is already running.";
4274 error( RtAudioError::WARNING );
4278 // update stream state
4279 stream_.state = STREAM_RUNNING;
4281 // create WASAPI stream thread
4282 stream_.callbackInfo.thread = ( ThreadHandle ) CreateThread( NULL, 0, runWasapiThread, this, CREATE_SUSPENDED, NULL );
4284 if ( !stream_.callbackInfo.thread ) {
4285 errorText_ = "RtApiWasapi::startStream: Unable to instantiate callback thread.";
4286 error( RtAudioError::THREAD_ERROR );
4289 SetThreadPriority( ( void* ) stream_.callbackInfo.thread, stream_.callbackInfo.priority );
4290 ResumeThread( ( void* ) stream_.callbackInfo.thread );
4294 //-----------------------------------------------------------------------------
4296 void RtApiWasapi::stopStream( void )
4300 if ( stream_.state == STREAM_STOPPED ) {
4301 errorText_ = "RtApiWasapi::stopStream: The stream is already stopped.";
4302 error( RtAudioError::WARNING );
4306 // inform stream thread by setting stream state to STREAM_STOPPING
4307 stream_.state = STREAM_STOPPING;
4309 // wait until stream thread is stopped
4310 while( stream_.state != STREAM_STOPPED ) {
4314 // Wait for the last buffer to play before stopping.
4315 Sleep( 1000 * stream_.bufferSize / stream_.sampleRate );
4317 // stop capture client if applicable
4318 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) {
4319 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient->Stop();
4320 if ( FAILED( hr ) ) {
4321 errorText_ = "RtApiWasapi::stopStream: Unable to stop capture stream.";
4322 error( RtAudioError::DRIVER_ERROR );
4327 // stop render client if applicable
4328 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) {
4329 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient->Stop();
4330 if ( FAILED( hr ) ) {
4331 errorText_ = "RtApiWasapi::stopStream: Unable to stop render stream.";
4332 error( RtAudioError::DRIVER_ERROR );
4337 // close thread handle
4338 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
4339 errorText_ = "RtApiWasapi::stopStream: Unable to close callback thread.";
4340 error( RtAudioError::THREAD_ERROR );
4344 stream_.callbackInfo.thread = (ThreadHandle) NULL;
4347 //-----------------------------------------------------------------------------
4349 void RtApiWasapi::abortStream( void )
4353 if ( stream_.state == STREAM_STOPPED ) {
4354 errorText_ = "RtApiWasapi::abortStream: The stream is already stopped.";
4355 error( RtAudioError::WARNING );
4359 // inform stream thread by setting stream state to STREAM_STOPPING
4360 stream_.state = STREAM_STOPPING;
4362 // wait until stream thread is stopped
4363 while ( stream_.state != STREAM_STOPPED ) {
4367 // stop capture client if applicable
4368 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) {
4369 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient->Stop();
4370 if ( FAILED( hr ) ) {
4371 errorText_ = "RtApiWasapi::abortStream: Unable to stop capture stream.";
4372 error( RtAudioError::DRIVER_ERROR );
4377 // stop render client if applicable
4378 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) {
4379 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient->Stop();
4380 if ( FAILED( hr ) ) {
4381 errorText_ = "RtApiWasapi::abortStream: Unable to stop render stream.";
4382 error( RtAudioError::DRIVER_ERROR );
4387 // close thread handle
4388 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
4389 errorText_ = "RtApiWasapi::abortStream: Unable to close callback thread.";
4390 error( RtAudioError::THREAD_ERROR );
4394 stream_.callbackInfo.thread = (ThreadHandle) NULL;
4397 //-----------------------------------------------------------------------------
4399 bool RtApiWasapi::probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
4400 unsigned int firstChannel, unsigned int sampleRate,
4401 RtAudioFormat format, unsigned int* bufferSize,
4402 RtAudio::StreamOptions* options )
4404 bool methodResult = FAILURE;
4405 unsigned int captureDeviceCount = 0;
4406 unsigned int renderDeviceCount = 0;
4408 IMMDeviceCollection* captureDevices = NULL;
4409 IMMDeviceCollection* renderDevices = NULL;
4410 IMMDevice* devicePtr = NULL;
4411 WAVEFORMATEX* deviceFormat = NULL;
4412 unsigned int bufferBytes;
4413 stream_.state = STREAM_STOPPED;
4414 RtAudio::DeviceInfo deviceInfo;
4416 // create API Handle if not already created
4417 if ( !stream_.apiHandle )
4418 stream_.apiHandle = ( void* ) new WasapiHandle();
4420 // Count capture devices
4422 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4423 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4424 if ( FAILED( hr ) ) {
4425 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device collection.";
4429 hr = captureDevices->GetCount( &captureDeviceCount );
4430 if ( FAILED( hr ) ) {
4431 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device count.";
4435 // Count render devices
4436 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4437 if ( FAILED( hr ) ) {
4438 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device collection.";
4442 hr = renderDevices->GetCount( &renderDeviceCount );
4443 if ( FAILED( hr ) ) {
4444 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device count.";
4448 // validate device index
4449 if ( device >= captureDeviceCount + renderDeviceCount ) {
4450 errorType = RtAudioError::INVALID_USE;
4451 errorText_ = "RtApiWasapi::probeDeviceOpen: Invalid device index.";
4455 deviceInfo = getDeviceInfo( device );
4457 // validate sample rate
4458 if ( sampleRate != deviceInfo.preferredSampleRate )
4460 errorType = RtAudioError::INVALID_USE;
4461 std::stringstream ss;
4462 ss << "RtApiWasapi::probeDeviceOpen: " << sampleRate
4463 << "Hz sample rate not supported. This device only supports "
4464 << deviceInfo.preferredSampleRate << "Hz.";
4465 errorText_ = ss.str();
4469 // determine whether index falls within capture or render devices
4470 if ( device >= renderDeviceCount ) {
4471 if ( mode != INPUT ) {
4472 errorType = RtAudioError::INVALID_USE;
4473 errorText_ = "RtApiWasapi::probeDeviceOpen: Capture device selected as output device.";
4477 // retrieve captureAudioClient from devicePtr
4478 IAudioClient*& captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4480 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
4481 if ( FAILED( hr ) ) {
4482 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device handle.";
4486 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4487 NULL, ( void** ) &captureAudioClient );
4488 if ( FAILED( hr ) ) {
4489 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device audio client.";
4493 hr = captureAudioClient->GetMixFormat( &deviceFormat );
4494 if ( FAILED( hr ) ) {
4495 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device mix format.";
4499 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4500 captureAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4503 if ( mode != OUTPUT ) {
4504 errorType = RtAudioError::INVALID_USE;
4505 errorText_ = "RtApiWasapi::probeDeviceOpen: Render device selected as input device.";
4509 // retrieve renderAudioClient from devicePtr
4510 IAudioClient*& renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4512 hr = renderDevices->Item( device, &devicePtr );
4513 if ( FAILED( hr ) ) {
4514 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device handle.";
4518 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4519 NULL, ( void** ) &renderAudioClient );
4520 if ( FAILED( hr ) ) {
4521 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device audio client.";
4525 hr = renderAudioClient->GetMixFormat( &deviceFormat );
4526 if ( FAILED( hr ) ) {
4527 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device mix format.";
4531 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4532 renderAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4536 if ( ( stream_.mode == OUTPUT && mode == INPUT ) ||
4537 ( stream_.mode == INPUT && mode == OUTPUT ) ) {
4538 stream_.mode = DUPLEX;
4541 stream_.mode = mode;
4544 stream_.device[mode] = device;
4545 stream_.doByteSwap[mode] = false;
4546 stream_.sampleRate = sampleRate;
4547 stream_.bufferSize = *bufferSize;
4548 stream_.nBuffers = 1;
4549 stream_.nUserChannels[mode] = channels;
4550 stream_.channelOffset[mode] = firstChannel;
4551 stream_.userFormat = format;
4552 stream_.deviceFormat[mode] = deviceInfo.nativeFormats;
4554 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
4555 stream_.userInterleaved = false;
4557 stream_.userInterleaved = true;
4558 stream_.deviceInterleaved[mode] = true;
4560 // Set flags for buffer conversion.
4561 stream_.doConvertBuffer[mode] = false;
4562 if ( stream_.userFormat != stream_.deviceFormat[mode] ||
4563 stream_.nUserChannels != stream_.nDeviceChannels )
4564 stream_.doConvertBuffer[mode] = true;
4565 else if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
4566 stream_.nUserChannels[mode] > 1 )
4567 stream_.doConvertBuffer[mode] = true;
4569 if ( stream_.doConvertBuffer[mode] )
4570 setConvertInfo( mode, 0 );
4572 // Allocate necessary internal buffers
4573 bufferBytes = stream_.nUserChannels[mode] * stream_.bufferSize * formatBytes( stream_.userFormat );
4575 stream_.userBuffer[mode] = ( char* ) calloc( bufferBytes, 1 );
4576 if ( !stream_.userBuffer[mode] ) {
4577 errorType = RtAudioError::MEMORY_ERROR;
4578 errorText_ = "RtApiWasapi::probeDeviceOpen: Error allocating user buffer memory.";
4582 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME )
4583 stream_.callbackInfo.priority = 15;
4585 stream_.callbackInfo.priority = 0;
4587 ///! TODO: RTAUDIO_MINIMIZE_LATENCY // Provide stream buffers directly to callback
4588 ///! TODO: RTAUDIO_HOG_DEVICE // Exclusive mode
4590 methodResult = SUCCESS;
4594 SAFE_RELEASE( captureDevices );
4595 SAFE_RELEASE( renderDevices );
4596 SAFE_RELEASE( devicePtr );
4597 CoTaskMemFree( deviceFormat );
4599 // if method failed, close the stream
4600 if ( methodResult == FAILURE )
4603 if ( !errorText_.empty() )
4605 return methodResult;
4608 //=============================================================================
4610 DWORD WINAPI RtApiWasapi::runWasapiThread( void* wasapiPtr )
4613 ( ( RtApiWasapi* ) wasapiPtr )->wasapiThread();
4618 DWORD WINAPI RtApiWasapi::stopWasapiThread( void* wasapiPtr )
4621 ( ( RtApiWasapi* ) wasapiPtr )->stopStream();
4626 DWORD WINAPI RtApiWasapi::abortWasapiThread( void* wasapiPtr )
4629 ( ( RtApiWasapi* ) wasapiPtr )->abortStream();
4634 //-----------------------------------------------------------------------------
4636 void RtApiWasapi::wasapiThread()
4638 // as this is a new thread, we must CoInitialize it
4639 CoInitialize( NULL );
4643 IAudioClient* captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4644 IAudioClient* renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4645 IAudioCaptureClient* captureClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureClient;
4646 IAudioRenderClient* renderClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderClient;
4647 HANDLE captureEvent = ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent;
4648 HANDLE renderEvent = ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent;
4650 WAVEFORMATEX* captureFormat = NULL;
4651 WAVEFORMATEX* renderFormat = NULL;
4652 WasapiBuffer captureBuffer;
4653 WasapiBuffer renderBuffer;
4655 // declare local stream variables
4656 RtAudioCallback callback = ( RtAudioCallback ) stream_.callbackInfo.callback;
4657 BYTE* streamBuffer = NULL;
4658 unsigned long captureFlags = 0;
4659 unsigned int bufferFrameCount = 0;
4660 unsigned int numFramesPadding = 0;
4661 bool callbackPushed = false;
4662 bool callbackPulled = false;
4663 bool callbackStopped = false;
4664 int callbackResult = 0;
4666 unsigned int deviceBuffSize = 0;
4669 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4671 // Attempt to assign "Pro Audio" characteristic to thread
4672 HMODULE AvrtDll = LoadLibrary( (LPCTSTR) "AVRT.dll" );
4674 DWORD taskIndex = 0;
4675 TAvSetMmThreadCharacteristicsPtr AvSetMmThreadCharacteristicsPtr = ( TAvSetMmThreadCharacteristicsPtr ) GetProcAddress( AvrtDll, "AvSetMmThreadCharacteristicsW" );
4676 AvSetMmThreadCharacteristicsPtr( L"Pro Audio", &taskIndex );
4677 FreeLibrary( AvrtDll );
4680 // start capture stream if applicable
4681 if ( captureAudioClient ) {
4682 hr = captureAudioClient->GetMixFormat( &captureFormat );
4683 if ( FAILED( hr ) ) {
4684 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
4688 // initialize capture stream according to desire buffer size
4689 REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) stream_.bufferSize * 10000000 / captureFormat->nSamplesPerSec );
4691 if ( !captureClient ) {
4692 hr = captureAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
4693 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
4694 desiredBufferPeriod,
4695 desiredBufferPeriod,
4698 if ( FAILED( hr ) ) {
4699 errorText_ = "RtApiWasapi::wasapiThread: Unable to initialize capture audio client.";
4703 hr = captureAudioClient->GetService( __uuidof( IAudioCaptureClient ),
4704 ( void** ) &captureClient );
4705 if ( FAILED( hr ) ) {
4706 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve capture client handle.";
4710 // configure captureEvent to trigger on every available capture buffer
4711 captureEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
4712 if ( !captureEvent ) {
4713 errorType = RtAudioError::SYSTEM_ERROR;
4714 errorText_ = "RtApiWasapi::wasapiThread: Unable to create capture event.";
4718 hr = captureAudioClient->SetEventHandle( captureEvent );
4719 if ( FAILED( hr ) ) {
4720 errorText_ = "RtApiWasapi::wasapiThread: Unable to set capture event handle.";
4724 ( ( WasapiHandle* ) stream_.apiHandle )->captureClient = captureClient;
4725 ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent = captureEvent;
4728 unsigned int inBufferSize = 0;
4729 hr = captureAudioClient->GetBufferSize( &inBufferSize );
4730 if ( FAILED( hr ) ) {
4731 errorText_ = "RtApiWasapi::wasapiThread: Unable to get capture buffer size.";
4735 // scale outBufferSize according to stream->user sample rate ratio
4736 unsigned int outBufferSize = ( unsigned int ) stream_.bufferSize * stream_.nDeviceChannels[INPUT];
4737 inBufferSize *= stream_.nDeviceChannels[INPUT];
4739 // set captureBuffer size
4740 captureBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[INPUT] ) );
4742 // reset the capture stream
4743 hr = captureAudioClient->Reset();
4744 if ( FAILED( hr ) ) {
4745 errorText_ = "RtApiWasapi::wasapiThread: Unable to reset capture stream.";
4749 // start the capture stream
4750 hr = captureAudioClient->Start();
4751 if ( FAILED( hr ) ) {
4752 errorText_ = "RtApiWasapi::wasapiThread: Unable to start capture stream.";
4757 // start render stream if applicable
4758 if ( renderAudioClient ) {
4759 hr = renderAudioClient->GetMixFormat( &renderFormat );
4760 if ( FAILED( hr ) ) {
4761 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
4765 // initialize render stream according to desire buffer size
4766 REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) stream_.bufferSize * 10000000 / renderFormat->nSamplesPerSec );
4768 if ( !renderClient ) {
4769 hr = renderAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
4770 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
4771 desiredBufferPeriod,
4772 desiredBufferPeriod,
4775 if ( FAILED( hr ) ) {
4776 errorText_ = "RtApiWasapi::wasapiThread: Unable to initialize render audio client.";
4780 hr = renderAudioClient->GetService( __uuidof( IAudioRenderClient ),
4781 ( void** ) &renderClient );
4782 if ( FAILED( hr ) ) {
4783 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render client handle.";
4787 // configure renderEvent to trigger on every available render buffer
4788 renderEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
4789 if ( !renderEvent ) {
4790 errorType = RtAudioError::SYSTEM_ERROR;
4791 errorText_ = "RtApiWasapi::wasapiThread: Unable to create render event.";
4795 hr = renderAudioClient->SetEventHandle( renderEvent );
4796 if ( FAILED( hr ) ) {
4797 errorText_ = "RtApiWasapi::wasapiThread: Unable to set render event handle.";
4801 ( ( WasapiHandle* ) stream_.apiHandle )->renderClient = renderClient;
4802 ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent = renderEvent;
4805 unsigned int outBufferSize = 0;
4806 hr = renderAudioClient->GetBufferSize( &outBufferSize );
4807 if ( FAILED( hr ) ) {
4808 errorText_ = "RtApiWasapi::wasapiThread: Unable to get render buffer size.";
4812 // scale inBufferSize according to user->stream sample rate ratio
4813 unsigned int inBufferSize = ( unsigned int ) stream_.bufferSize * stream_.nDeviceChannels[OUTPUT];
4814 outBufferSize *= stream_.nDeviceChannels[OUTPUT];
4816 // set renderBuffer size
4817 renderBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[OUTPUT] ) );
4819 // reset the render stream
4820 hr = renderAudioClient->Reset();
4821 if ( FAILED( hr ) ) {
4822 errorText_ = "RtApiWasapi::wasapiThread: Unable to reset render stream.";
4826 // start the render stream
4827 hr = renderAudioClient->Start();
4828 if ( FAILED( hr ) ) {
4829 errorText_ = "RtApiWasapi::wasapiThread: Unable to start render stream.";
4834 if ( stream_.mode == INPUT ) {
4835 using namespace std; // for roundf
4836 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
4838 else if ( stream_.mode == OUTPUT ) {
4839 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
4841 else if ( stream_.mode == DUPLEX ) {
4842 deviceBuffSize = std::max( stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
4843 stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
4846 stream_.deviceBuffer = ( char* ) malloc( deviceBuffSize );
4847 if ( !stream_.deviceBuffer ) {
4848 errorType = RtAudioError::MEMORY_ERROR;
4849 errorText_ = "RtApiWasapi::wasapiThread: Error allocating device buffer memory.";
4853 // stream process loop
4854 while ( stream_.state != STREAM_STOPPING ) {
4855 if ( !callbackPulled ) {
4858 // 1. Pull callback buffer from inputBuffer
4859 // 2. If 1. was successful: Convert callback buffer to user format
4861 if ( captureAudioClient ) {
4862 // Pull callback buffer from inputBuffer
4863 callbackPulled = captureBuffer.pullBuffer( stream_.deviceBuffer,
4864 ( unsigned int ) stream_.bufferSize * stream_.nDeviceChannels[INPUT],
4865 stream_.deviceFormat[INPUT] );
4867 if ( callbackPulled ) {
4868 if ( stream_.doConvertBuffer[INPUT] ) {
4869 // Convert callback buffer to user format
4870 convertBuffer( stream_.userBuffer[INPUT],
4871 stream_.deviceBuffer,
4872 stream_.convertInfo[INPUT] );
4875 // no further conversion, simple copy deviceBuffer to userBuffer
4876 memcpy( stream_.userBuffer[INPUT],
4877 stream_.deviceBuffer,
4878 stream_.bufferSize * stream_.nUserChannels[INPUT] * formatBytes( stream_.userFormat ) );
4883 // if there is no capture stream, set callbackPulled flag
4884 callbackPulled = true;
4889 // 1. Execute user callback method
4890 // 2. Handle return value from callback
4892 // if callback has not requested the stream to stop
4893 if ( callbackPulled && !callbackStopped ) {
4894 // Execute user callback method
4895 callbackResult = callback( stream_.userBuffer[OUTPUT],
4896 stream_.userBuffer[INPUT],
4899 captureFlags & AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY ? RTAUDIO_INPUT_OVERFLOW : 0,
4900 stream_.callbackInfo.userData );
4902 // Handle return value from callback
4903 if ( callbackResult == 1 ) {
4904 // instantiate a thread to stop this thread
4905 HANDLE threadHandle = CreateThread( NULL, 0, stopWasapiThread, this, 0, NULL );
4906 if ( !threadHandle ) {
4907 errorType = RtAudioError::THREAD_ERROR;
4908 errorText_ = "RtApiWasapi::wasapiThread: Unable to instantiate stream stop thread.";
4911 else if ( !CloseHandle( threadHandle ) ) {
4912 errorType = RtAudioError::THREAD_ERROR;
4913 errorText_ = "RtApiWasapi::wasapiThread: Unable to close stream stop thread handle.";
4917 callbackStopped = true;
4919 else if ( callbackResult == 2 ) {
4920 // instantiate a thread to stop this thread
4921 HANDLE threadHandle = CreateThread( NULL, 0, abortWasapiThread, this, 0, NULL );
4922 if ( !threadHandle ) {
4923 errorType = RtAudioError::THREAD_ERROR;
4924 errorText_ = "RtApiWasapi::wasapiThread: Unable to instantiate stream abort thread.";
4927 else if ( !CloseHandle( threadHandle ) ) {
4928 errorType = RtAudioError::THREAD_ERROR;
4929 errorText_ = "RtApiWasapi::wasapiThread: Unable to close stream abort thread handle.";
4933 callbackStopped = true;
4940 // 1. Convert callback buffer to stream format
4941 // 2. Push callback buffer into outputBuffer
4943 if ( renderAudioClient && callbackPulled ) {
4944 if ( stream_.doConvertBuffer[OUTPUT] ) {
4945 // Convert callback buffer to stream format
4946 convertBuffer( stream_.deviceBuffer,
4947 stream_.userBuffer[OUTPUT],
4948 stream_.convertInfo[OUTPUT] );
4952 // Push callback buffer into outputBuffer
4953 callbackPushed = renderBuffer.pushBuffer( stream_.deviceBuffer,
4954 stream_.bufferSize * stream_.nDeviceChannels[OUTPUT],
4955 stream_.deviceFormat[OUTPUT] );
4958 // if there is no render stream, set callbackPushed flag
4959 callbackPushed = true;
4964 // 1. Get capture buffer from stream
4965 // 2. Push capture buffer into inputBuffer
4966 // 3. If 2. was successful: Release capture buffer
4968 if ( captureAudioClient ) {
4969 // if the callback input buffer was not pulled from captureBuffer, wait for next capture event
4970 if ( !callbackPulled ) {
4971 WaitForSingleObject( captureEvent, INFINITE );
4974 // Get capture buffer from stream
4975 hr = captureClient->GetBuffer( &streamBuffer,
4977 &captureFlags, NULL, NULL );
4978 if ( FAILED( hr ) ) {
4979 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve capture buffer.";
4983 if ( bufferFrameCount != 0 ) {
4984 // Push capture buffer into inputBuffer
4985 if ( captureBuffer.pushBuffer( ( char* ) streamBuffer,
4986 bufferFrameCount * stream_.nDeviceChannels[INPUT],
4987 stream_.deviceFormat[INPUT] ) )
4989 // Release capture buffer
4990 hr = captureClient->ReleaseBuffer( bufferFrameCount );
4991 if ( FAILED( hr ) ) {
4992 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
4998 // Inform WASAPI that capture was unsuccessful
4999 hr = captureClient->ReleaseBuffer( 0 );
5000 if ( FAILED( hr ) ) {
5001 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5008 // Inform WASAPI that capture was unsuccessful
5009 hr = captureClient->ReleaseBuffer( 0 );
5010 if ( FAILED( hr ) ) {
5011 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5019 // 1. Get render buffer from stream
5020 // 2. Pull next buffer from outputBuffer
5021 // 3. If 2. was successful: Fill render buffer with next buffer
5022 // Release render buffer
5024 if ( renderAudioClient ) {
5025 // if the callback output buffer was not pushed to renderBuffer, wait for next render event
5026 if ( callbackPulled && !callbackPushed ) {
5027 WaitForSingleObject( renderEvent, INFINITE );
5030 // Get render buffer from stream
5031 hr = renderAudioClient->GetBufferSize( &bufferFrameCount );
5032 if ( FAILED( hr ) ) {
5033 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer size.";
5037 hr = renderAudioClient->GetCurrentPadding( &numFramesPadding );
5038 if ( FAILED( hr ) ) {
5039 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer padding.";
5043 bufferFrameCount -= numFramesPadding;
5045 if ( bufferFrameCount != 0 ) {
5046 hr = renderClient->GetBuffer( bufferFrameCount, &streamBuffer );
5047 if ( FAILED( hr ) ) {
5048 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer.";
5052 // Pull next buffer from outputBuffer
5053 // Fill render buffer with next buffer
5054 if ( renderBuffer.pullBuffer( ( char* ) streamBuffer,
5055 bufferFrameCount * stream_.nDeviceChannels[OUTPUT],
5056 stream_.deviceFormat[OUTPUT] ) )
5058 // Release render buffer
5059 hr = renderClient->ReleaseBuffer( bufferFrameCount, 0 );
5060 if ( FAILED( hr ) ) {
5061 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5067 // Inform WASAPI that render was unsuccessful
5068 hr = renderClient->ReleaseBuffer( 0, 0 );
5069 if ( FAILED( hr ) ) {
5070 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5077 // Inform WASAPI that render was unsuccessful
5078 hr = renderClient->ReleaseBuffer( 0, 0 );
5079 if ( FAILED( hr ) ) {
5080 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5086 // if the callback buffer was pushed renderBuffer reset callbackPulled flag
5087 if ( callbackPushed ) {
5088 callbackPulled = false;
5090 RtApi::tickStreamTime();
5097 CoTaskMemFree( captureFormat );
5098 CoTaskMemFree( renderFormat );
5102 // update stream state
5103 stream_.state = STREAM_STOPPED;
5105 if ( errorText_.empty() )
5111 //******************** End of __WINDOWS_WASAPI__ *********************//
5115 #if defined(__WINDOWS_DS__) // Windows DirectSound API
5117 // Modified by Robin Davies, October 2005
5118 // - Improvements to DirectX pointer chasing.
5119 // - Bug fix for non-power-of-two Asio granularity used by Edirol PCR-A30.
5120 // - Auto-call CoInitialize for DSOUND and ASIO platforms.
5121 // Various revisions for RtAudio 4.0 by Gary Scavone, April 2007
5122 // Changed device query structure for RtAudio 4.0.7, January 2010
5124 #include <windows.h>
5125 #include <process.h>
5126 #include <mmsystem.h>
5130 #include <algorithm>
5132 #if defined(__MINGW32__)
5133 // missing from latest mingw winapi
5134 #define WAVE_FORMAT_96M08 0x00010000 /* 96 kHz, Mono, 8-bit */
5135 #define WAVE_FORMAT_96S08 0x00020000 /* 96 kHz, Stereo, 8-bit */
5136 #define WAVE_FORMAT_96M16 0x00040000 /* 96 kHz, Mono, 16-bit */
5137 #define WAVE_FORMAT_96S16 0x00080000 /* 96 kHz, Stereo, 16-bit */
5140 #define MINIMUM_DEVICE_BUFFER_SIZE 32768
5142 #ifdef _MSC_VER // if Microsoft Visual C++
5143 #pragma comment( lib, "winmm.lib" ) // then, auto-link winmm.lib. Otherwise, it has to be added manually.
5146 static inline DWORD dsPointerBetween( DWORD pointer, DWORD laterPointer, DWORD earlierPointer, DWORD bufferSize )
5148 if ( pointer > bufferSize ) pointer -= bufferSize;
5149 if ( laterPointer < earlierPointer ) laterPointer += bufferSize;
5150 if ( pointer < earlierPointer ) pointer += bufferSize;
5151 return pointer >= earlierPointer && pointer < laterPointer;
5154 // A structure to hold various information related to the DirectSound
5155 // API implementation.
5157 unsigned int drainCounter; // Tracks callback counts when draining
5158 bool internalDrain; // Indicates if stop is initiated from callback or not.
5162 UINT bufferPointer[2];
5163 DWORD dsBufferSize[2];
5164 DWORD dsPointerLeadTime[2]; // the number of bytes ahead of the safe pointer to lead by.
5168 :drainCounter(0), internalDrain(false) { id[0] = 0; id[1] = 0; buffer[0] = 0; buffer[1] = 0; xrun[0] = false; xrun[1] = false; bufferPointer[0] = 0; bufferPointer[1] = 0; }
5171 // Declarations for utility functions, callbacks, and structures
5172 // specific to the DirectSound implementation.
5173 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
5174 LPCTSTR description,
5178 static const char* getErrorString( int code );
5180 static unsigned __stdcall callbackHandler( void *ptr );
5189 : found(false) { validId[0] = false; validId[1] = false; }
5192 struct DsProbeData {
5194 std::vector<struct DsDevice>* dsDevices;
5197 RtApiDs :: RtApiDs()
5199 // Dsound will run both-threaded. If CoInitialize fails, then just
5200 // accept whatever the mainline chose for a threading model.
5201 coInitialized_ = false;
5202 HRESULT hr = CoInitialize( NULL );
5203 if ( !FAILED( hr ) ) coInitialized_ = true;
5206 RtApiDs :: ~RtApiDs()
5208 if ( stream_.state != STREAM_CLOSED ) closeStream();
5209 if ( coInitialized_ ) CoUninitialize(); // balanced call.
5212 // The DirectSound default output is always the first device.
5213 unsigned int RtApiDs :: getDefaultOutputDevice( void )
5218 // The DirectSound default input is always the first input device,
5219 // which is the first capture device enumerated.
5220 unsigned int RtApiDs :: getDefaultInputDevice( void )
5225 unsigned int RtApiDs :: getDeviceCount( void )
5227 // Set query flag for previously found devices to false, so that we
5228 // can check for any devices that have disappeared.
5229 for ( unsigned int i=0; i<dsDevices.size(); i++ )
5230 dsDevices[i].found = false;
5232 // Query DirectSound devices.
5233 struct DsProbeData probeInfo;
5234 probeInfo.isInput = false;
5235 probeInfo.dsDevices = &dsDevices;
5236 HRESULT result = DirectSoundEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
5237 if ( FAILED( result ) ) {
5238 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating output devices!";
5239 errorText_ = errorStream_.str();
5240 error( RtAudioError::WARNING );
5243 // Query DirectSoundCapture devices.
5244 probeInfo.isInput = true;
5245 result = DirectSoundCaptureEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
5246 if ( FAILED( result ) ) {
5247 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating input devices!";
5248 errorText_ = errorStream_.str();
5249 error( RtAudioError::WARNING );
5252 // Clean out any devices that may have disappeared (code update submitted by Eli Zehngut).
5253 for ( unsigned int i=0; i<dsDevices.size(); ) {
5254 if ( dsDevices[i].found == false ) dsDevices.erase( dsDevices.begin() + i );
5258 return static_cast<unsigned int>(dsDevices.size());
5261 RtAudio::DeviceInfo RtApiDs :: getDeviceInfo( unsigned int device )
5263 RtAudio::DeviceInfo info;
5264 info.probed = false;
5266 if ( dsDevices.size() == 0 ) {
5267 // Force a query of all devices
5269 if ( dsDevices.size() == 0 ) {
5270 errorText_ = "RtApiDs::getDeviceInfo: no devices found!";
5271 error( RtAudioError::INVALID_USE );
5276 if ( device >= dsDevices.size() ) {
5277 errorText_ = "RtApiDs::getDeviceInfo: device ID is invalid!";
5278 error( RtAudioError::INVALID_USE );
5283 if ( dsDevices[ device ].validId[0] == false ) goto probeInput;
5285 LPDIRECTSOUND output;
5287 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
5288 if ( FAILED( result ) ) {
5289 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
5290 errorText_ = errorStream_.str();
5291 error( RtAudioError::WARNING );
5295 outCaps.dwSize = sizeof( outCaps );
5296 result = output->GetCaps( &outCaps );
5297 if ( FAILED( result ) ) {
5299 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting capabilities!";
5300 errorText_ = errorStream_.str();
5301 error( RtAudioError::WARNING );
5305 // Get output channel information.
5306 info.outputChannels = ( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ? 2 : 1;
5308 // Get sample rate information.
5309 info.sampleRates.clear();
5310 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
5311 if ( SAMPLE_RATES[k] >= (unsigned int) outCaps.dwMinSecondarySampleRate &&
5312 SAMPLE_RATES[k] <= (unsigned int) outCaps.dwMaxSecondarySampleRate ) {
5313 info.sampleRates.push_back( SAMPLE_RATES[k] );
5315 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
5316 info.preferredSampleRate = SAMPLE_RATES[k];
5320 // Get format information.
5321 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT ) info.nativeFormats |= RTAUDIO_SINT16;
5322 if ( outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) info.nativeFormats |= RTAUDIO_SINT8;
5326 if ( getDefaultOutputDevice() == device )
5327 info.isDefaultOutput = true;
5329 if ( dsDevices[ device ].validId[1] == false ) {
5330 info.name = dsDevices[ device ].name;
5337 LPDIRECTSOUNDCAPTURE input;
5338 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
5339 if ( FAILED( result ) ) {
5340 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
5341 errorText_ = errorStream_.str();
5342 error( RtAudioError::WARNING );
5347 inCaps.dwSize = sizeof( inCaps );
5348 result = input->GetCaps( &inCaps );
5349 if ( FAILED( result ) ) {
5351 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting object capabilities (" << dsDevices[ device ].name << ")!";
5352 errorText_ = errorStream_.str();
5353 error( RtAudioError::WARNING );
5357 // Get input channel information.
5358 info.inputChannels = inCaps.dwChannels;
5360 // Get sample rate and format information.
5361 std::vector<unsigned int> rates;
5362 if ( inCaps.dwChannels >= 2 ) {
5363 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5364 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5365 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5366 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5367 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5368 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5369 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5370 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5372 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
5373 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) rates.push_back( 11025 );
5374 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) rates.push_back( 22050 );
5375 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) rates.push_back( 44100 );
5376 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) rates.push_back( 96000 );
5378 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
5379 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) rates.push_back( 11025 );
5380 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) rates.push_back( 22050 );
5381 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) rates.push_back( 44100 );
5382 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) rates.push_back( 96000 );
5385 else if ( inCaps.dwChannels == 1 ) {
5386 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5387 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5388 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5389 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5390 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5391 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5392 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5393 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5395 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
5396 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) rates.push_back( 11025 );
5397 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) rates.push_back( 22050 );
5398 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) rates.push_back( 44100 );
5399 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) rates.push_back( 96000 );
5401 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
5402 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) rates.push_back( 11025 );
5403 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) rates.push_back( 22050 );
5404 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) rates.push_back( 44100 );
5405 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) rates.push_back( 96000 );
5408 else info.inputChannels = 0; // technically, this would be an error
5412 if ( info.inputChannels == 0 ) return info;
5414 // Copy the supported rates to the info structure but avoid duplication.
5416 for ( unsigned int i=0; i<rates.size(); i++ ) {
5418 for ( unsigned int j=0; j<info.sampleRates.size(); j++ ) {
5419 if ( rates[i] == info.sampleRates[j] ) {
5424 if ( found == false ) info.sampleRates.push_back( rates[i] );
5426 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
5428 // If device opens for both playback and capture, we determine the channels.
5429 if ( info.outputChannels > 0 && info.inputChannels > 0 )
5430 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
5432 if ( device == 0 ) info.isDefaultInput = true;
5434 // Copy name and return.
5435 info.name = dsDevices[ device ].name;
5440 bool RtApiDs :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
5441 unsigned int firstChannel, unsigned int sampleRate,
5442 RtAudioFormat format, unsigned int *bufferSize,
5443 RtAudio::StreamOptions *options )
5445 if ( channels + firstChannel > 2 ) {
5446 errorText_ = "RtApiDs::probeDeviceOpen: DirectSound does not support more than 2 channels per device.";
5450 size_t nDevices = dsDevices.size();
5451 if ( nDevices == 0 ) {
5452 // This should not happen because a check is made before this function is called.
5453 errorText_ = "RtApiDs::probeDeviceOpen: no devices found!";
5457 if ( device >= nDevices ) {
5458 // This should not happen because a check is made before this function is called.
5459 errorText_ = "RtApiDs::probeDeviceOpen: device ID is invalid!";
5463 if ( mode == OUTPUT ) {
5464 if ( dsDevices[ device ].validId[0] == false ) {
5465 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support output!";
5466 errorText_ = errorStream_.str();
5470 else { // mode == INPUT
5471 if ( dsDevices[ device ].validId[1] == false ) {
5472 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support input!";
5473 errorText_ = errorStream_.str();
5478 // According to a note in PortAudio, using GetDesktopWindow()
5479 // instead of GetForegroundWindow() is supposed to avoid problems
5480 // that occur when the application's window is not the foreground
5481 // window. Also, if the application window closes before the
5482 // DirectSound buffer, DirectSound can crash. In the past, I had
5483 // problems when using GetDesktopWindow() but it seems fine now
5484 // (January 2010). I'll leave it commented here.
5485 // HWND hWnd = GetForegroundWindow();
5486 HWND hWnd = GetDesktopWindow();
5488 // Check the numberOfBuffers parameter and limit the lowest value to
5489 // two. This is a judgement call and a value of two is probably too
5490 // low for capture, but it should work for playback.
5492 if ( options ) nBuffers = options->numberOfBuffers;
5493 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) nBuffers = 2;
5494 if ( nBuffers < 2 ) nBuffers = 3;
5496 // Check the lower range of the user-specified buffer size and set
5497 // (arbitrarily) to a lower bound of 32.
5498 if ( *bufferSize < 32 ) *bufferSize = 32;
5500 // Create the wave format structure. The data format setting will
5501 // be determined later.
5502 WAVEFORMATEX waveFormat;
5503 ZeroMemory( &waveFormat, sizeof(WAVEFORMATEX) );
5504 waveFormat.wFormatTag = WAVE_FORMAT_PCM;
5505 waveFormat.nChannels = channels + firstChannel;
5506 waveFormat.nSamplesPerSec = (unsigned long) sampleRate;
5508 // Determine the device buffer size. By default, we'll use the value
5509 // defined above (32K), but we will grow it to make allowances for
5510 // very large software buffer sizes.
5511 DWORD dsBufferSize = MINIMUM_DEVICE_BUFFER_SIZE;
5512 DWORD dsPointerLeadTime = 0;
5514 void *ohandle = 0, *bhandle = 0;
5516 if ( mode == OUTPUT ) {
5518 LPDIRECTSOUND output;
5519 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
5520 if ( FAILED( result ) ) {
5521 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
5522 errorText_ = errorStream_.str();
5527 outCaps.dwSize = sizeof( outCaps );
5528 result = output->GetCaps( &outCaps );
5529 if ( FAILED( result ) ) {
5531 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting capabilities (" << dsDevices[ device ].name << ")!";
5532 errorText_ = errorStream_.str();
5536 // Check channel information.
5537 if ( channels + firstChannel == 2 && !( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ) {
5538 errorStream_ << "RtApiDs::getDeviceInfo: the output device (" << dsDevices[ device ].name << ") does not support stereo playback.";
5539 errorText_ = errorStream_.str();
5543 // Check format information. Use 16-bit format unless not
5544 // supported or user requests 8-bit.
5545 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT &&
5546 !( format == RTAUDIO_SINT8 && outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) ) {
5547 waveFormat.wBitsPerSample = 16;
5548 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
5551 waveFormat.wBitsPerSample = 8;
5552 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
5554 stream_.userFormat = format;
5556 // Update wave format structure and buffer information.
5557 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
5558 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
5559 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
5561 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
5562 while ( dsPointerLeadTime * 2U > dsBufferSize )
5565 // Set cooperative level to DSSCL_EXCLUSIVE ... sound stops when window focus changes.
5566 // result = output->SetCooperativeLevel( hWnd, DSSCL_EXCLUSIVE );
5567 // Set cooperative level to DSSCL_PRIORITY ... sound remains when window focus changes.
5568 result = output->SetCooperativeLevel( hWnd, DSSCL_PRIORITY );
5569 if ( FAILED( result ) ) {
5571 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting cooperative level (" << dsDevices[ device ].name << ")!";
5572 errorText_ = errorStream_.str();
5576 // Even though we will write to the secondary buffer, we need to
5577 // access the primary buffer to set the correct output format
5578 // (since the default is 8-bit, 22 kHz!). Setup the DS primary
5579 // buffer description.
5580 DSBUFFERDESC bufferDescription;
5581 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
5582 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
5583 bufferDescription.dwFlags = DSBCAPS_PRIMARYBUFFER;
5585 // Obtain the primary buffer
5586 LPDIRECTSOUNDBUFFER buffer;
5587 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
5588 if ( FAILED( result ) ) {
5590 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") accessing primary buffer (" << dsDevices[ device ].name << ")!";
5591 errorText_ = errorStream_.str();
5595 // Set the primary DS buffer sound format.
5596 result = buffer->SetFormat( &waveFormat );
5597 if ( FAILED( result ) ) {
5599 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting primary buffer format (" << dsDevices[ device ].name << ")!";
5600 errorText_ = errorStream_.str();
5604 // Setup the secondary DS buffer description.
5605 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
5606 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
5607 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
5608 DSBCAPS_GLOBALFOCUS |
5609 DSBCAPS_GETCURRENTPOSITION2 |
5610 DSBCAPS_LOCHARDWARE ); // Force hardware mixing
5611 bufferDescription.dwBufferBytes = dsBufferSize;
5612 bufferDescription.lpwfxFormat = &waveFormat;
5614 // Try to create the secondary DS buffer. If that doesn't work,
5615 // try to use software mixing. Otherwise, there's a problem.
5616 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
5617 if ( FAILED( result ) ) {
5618 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
5619 DSBCAPS_GLOBALFOCUS |
5620 DSBCAPS_GETCURRENTPOSITION2 |
5621 DSBCAPS_LOCSOFTWARE ); // Force software mixing
5622 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
5623 if ( FAILED( result ) ) {
5625 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating secondary buffer (" << dsDevices[ device ].name << ")!";
5626 errorText_ = errorStream_.str();
5631 // Get the buffer size ... might be different from what we specified.
5633 dsbcaps.dwSize = sizeof( DSBCAPS );
5634 result = buffer->GetCaps( &dsbcaps );
5635 if ( FAILED( result ) ) {
5638 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
5639 errorText_ = errorStream_.str();
5643 dsBufferSize = dsbcaps.dwBufferBytes;
5645 // Lock the DS buffer
5648 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
5649 if ( FAILED( result ) ) {
5652 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking buffer (" << dsDevices[ device ].name << ")!";
5653 errorText_ = errorStream_.str();
5657 // Zero the DS buffer
5658 ZeroMemory( audioPtr, dataLen );
5660 // Unlock the DS buffer
5661 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
5662 if ( FAILED( result ) ) {
5665 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking buffer (" << dsDevices[ device ].name << ")!";
5666 errorText_ = errorStream_.str();
5670 ohandle = (void *) output;
5671 bhandle = (void *) buffer;
5674 if ( mode == INPUT ) {
5676 LPDIRECTSOUNDCAPTURE input;
5677 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
5678 if ( FAILED( result ) ) {
5679 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
5680 errorText_ = errorStream_.str();
5685 inCaps.dwSize = sizeof( inCaps );
5686 result = input->GetCaps( &inCaps );
5687 if ( FAILED( result ) ) {
5689 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting input capabilities (" << dsDevices[ device ].name << ")!";
5690 errorText_ = errorStream_.str();
5694 // Check channel information.
5695 if ( inCaps.dwChannels < channels + firstChannel ) {
5696 errorText_ = "RtApiDs::getDeviceInfo: the input device does not support requested input channels.";
5700 // Check format information. Use 16-bit format unless user
5702 DWORD deviceFormats;
5703 if ( channels + firstChannel == 2 ) {
5704 deviceFormats = WAVE_FORMAT_1S08 | WAVE_FORMAT_2S08 | WAVE_FORMAT_4S08 | WAVE_FORMAT_96S08;
5705 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
5706 waveFormat.wBitsPerSample = 8;
5707 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
5709 else { // assume 16-bit is supported
5710 waveFormat.wBitsPerSample = 16;
5711 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
5714 else { // channel == 1
5715 deviceFormats = WAVE_FORMAT_1M08 | WAVE_FORMAT_2M08 | WAVE_FORMAT_4M08 | WAVE_FORMAT_96M08;
5716 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
5717 waveFormat.wBitsPerSample = 8;
5718 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
5720 else { // assume 16-bit is supported
5721 waveFormat.wBitsPerSample = 16;
5722 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
5725 stream_.userFormat = format;
5727 // Update wave format structure and buffer information.
5728 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
5729 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
5730 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
5732 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
5733 while ( dsPointerLeadTime * 2U > dsBufferSize )
5736 // Setup the secondary DS buffer description.
5737 DSCBUFFERDESC bufferDescription;
5738 ZeroMemory( &bufferDescription, sizeof( DSCBUFFERDESC ) );
5739 bufferDescription.dwSize = sizeof( DSCBUFFERDESC );
5740 bufferDescription.dwFlags = 0;
5741 bufferDescription.dwReserved = 0;
5742 bufferDescription.dwBufferBytes = dsBufferSize;
5743 bufferDescription.lpwfxFormat = &waveFormat;
5745 // Create the capture buffer.
5746 LPDIRECTSOUNDCAPTUREBUFFER buffer;
5747 result = input->CreateCaptureBuffer( &bufferDescription, &buffer, NULL );
5748 if ( FAILED( result ) ) {
5750 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating input buffer (" << dsDevices[ device ].name << ")!";
5751 errorText_ = errorStream_.str();
5755 // Get the buffer size ... might be different from what we specified.
5757 dscbcaps.dwSize = sizeof( DSCBCAPS );
5758 result = buffer->GetCaps( &dscbcaps );
5759 if ( FAILED( result ) ) {
5762 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
5763 errorText_ = errorStream_.str();
5767 dsBufferSize = dscbcaps.dwBufferBytes;
5769 // NOTE: We could have a problem here if this is a duplex stream
5770 // and the play and capture hardware buffer sizes are different
5771 // (I'm actually not sure if that is a problem or not).
5772 // Currently, we are not verifying that.
5774 // Lock the capture buffer
5777 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
5778 if ( FAILED( result ) ) {
5781 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking input buffer (" << dsDevices[ device ].name << ")!";
5782 errorText_ = errorStream_.str();
5787 ZeroMemory( audioPtr, dataLen );
5789 // Unlock the buffer
5790 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
5791 if ( FAILED( result ) ) {
5794 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking input buffer (" << dsDevices[ device ].name << ")!";
5795 errorText_ = errorStream_.str();
5799 ohandle = (void *) input;
5800 bhandle = (void *) buffer;
5803 // Set various stream parameters
5804 DsHandle *handle = 0;
5805 stream_.nDeviceChannels[mode] = channels + firstChannel;
5806 stream_.nUserChannels[mode] = channels;
5807 stream_.bufferSize = *bufferSize;
5808 stream_.channelOffset[mode] = firstChannel;
5809 stream_.deviceInterleaved[mode] = true;
5810 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
5811 else stream_.userInterleaved = true;
5813 // Set flag for buffer conversion
5814 stream_.doConvertBuffer[mode] = false;
5815 if (stream_.nUserChannels[mode] != stream_.nDeviceChannels[mode])
5816 stream_.doConvertBuffer[mode] = true;
5817 if (stream_.userFormat != stream_.deviceFormat[mode])
5818 stream_.doConvertBuffer[mode] = true;
5819 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
5820 stream_.nUserChannels[mode] > 1 )
5821 stream_.doConvertBuffer[mode] = true;
5823 // Allocate necessary internal buffers
5824 long bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
5825 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
5826 if ( stream_.userBuffer[mode] == NULL ) {
5827 errorText_ = "RtApiDs::probeDeviceOpen: error allocating user buffer memory.";
5831 if ( stream_.doConvertBuffer[mode] ) {
5833 bool makeBuffer = true;
5834 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
5835 if ( mode == INPUT ) {
5836 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
5837 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
5838 if ( bufferBytes <= (long) bytesOut ) makeBuffer = false;
5843 bufferBytes *= *bufferSize;
5844 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
5845 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
5846 if ( stream_.deviceBuffer == NULL ) {
5847 errorText_ = "RtApiDs::probeDeviceOpen: error allocating device buffer memory.";
5853 // Allocate our DsHandle structures for the stream.
5854 if ( stream_.apiHandle == 0 ) {
5856 handle = new DsHandle;
5858 catch ( std::bad_alloc& ) {
5859 errorText_ = "RtApiDs::probeDeviceOpen: error allocating AsioHandle memory.";
5863 // Create a manual-reset event.
5864 handle->condition = CreateEvent( NULL, // no security
5865 TRUE, // manual-reset
5866 FALSE, // non-signaled initially
5868 stream_.apiHandle = (void *) handle;
5871 handle = (DsHandle *) stream_.apiHandle;
5872 handle->id[mode] = ohandle;
5873 handle->buffer[mode] = bhandle;
5874 handle->dsBufferSize[mode] = dsBufferSize;
5875 handle->dsPointerLeadTime[mode] = dsPointerLeadTime;
5877 stream_.device[mode] = device;
5878 stream_.state = STREAM_STOPPED;
5879 if ( stream_.mode == OUTPUT && mode == INPUT )
5880 // We had already set up an output stream.
5881 stream_.mode = DUPLEX;
5883 stream_.mode = mode;
5884 stream_.nBuffers = nBuffers;
5885 stream_.sampleRate = sampleRate;
5887 // Setup the buffer conversion information structure.
5888 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
5890 // Setup the callback thread.
5891 if ( stream_.callbackInfo.isRunning == false ) {
5893 stream_.callbackInfo.isRunning = true;
5894 stream_.callbackInfo.object = (void *) this;
5895 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &callbackHandler,
5896 &stream_.callbackInfo, 0, &threadId );
5897 if ( stream_.callbackInfo.thread == 0 ) {
5898 errorText_ = "RtApiDs::probeDeviceOpen: error creating callback thread!";
5902 // Boost DS thread priority
5903 SetThreadPriority( (HANDLE) stream_.callbackInfo.thread, THREAD_PRIORITY_HIGHEST );
5909 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
5910 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
5911 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
5912 if ( buffer ) buffer->Release();
5915 if ( handle->buffer[1] ) {
5916 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
5917 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
5918 if ( buffer ) buffer->Release();
5921 CloseHandle( handle->condition );
5923 stream_.apiHandle = 0;
5926 for ( int i=0; i<2; i++ ) {
5927 if ( stream_.userBuffer[i] ) {
5928 free( stream_.userBuffer[i] );
5929 stream_.userBuffer[i] = 0;
5933 if ( stream_.deviceBuffer ) {
5934 free( stream_.deviceBuffer );
5935 stream_.deviceBuffer = 0;
5938 stream_.state = STREAM_CLOSED;
5942 void RtApiDs :: closeStream()
5944 if ( stream_.state == STREAM_CLOSED ) {
5945 errorText_ = "RtApiDs::closeStream(): no open stream to close!";
5946 error( RtAudioError::WARNING );
5950 // Stop the callback thread.
5951 stream_.callbackInfo.isRunning = false;
5952 WaitForSingleObject( (HANDLE) stream_.callbackInfo.thread, INFINITE );
5953 CloseHandle( (HANDLE) stream_.callbackInfo.thread );
5955 DsHandle *handle = (DsHandle *) stream_.apiHandle;
5957 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
5958 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
5959 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
5966 if ( handle->buffer[1] ) {
5967 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
5968 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
5975 CloseHandle( handle->condition );
5977 stream_.apiHandle = 0;
5980 for ( int i=0; i<2; i++ ) {
5981 if ( stream_.userBuffer[i] ) {
5982 free( stream_.userBuffer[i] );
5983 stream_.userBuffer[i] = 0;
5987 if ( stream_.deviceBuffer ) {
5988 free( stream_.deviceBuffer );
5989 stream_.deviceBuffer = 0;
5992 stream_.mode = UNINITIALIZED;
5993 stream_.state = STREAM_CLOSED;
5996 void RtApiDs :: startStream()
5999 if ( stream_.state == STREAM_RUNNING ) {
6000 errorText_ = "RtApiDs::startStream(): the stream is already running!";
6001 error( RtAudioError::WARNING );
6005 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6007 // Increase scheduler frequency on lesser windows (a side-effect of
6008 // increasing timer accuracy). On greater windows (Win2K or later),
6009 // this is already in effect.
6010 timeBeginPeriod( 1 );
6012 buffersRolling = false;
6013 duplexPrerollBytes = 0;
6015 if ( stream_.mode == DUPLEX ) {
6016 // 0.5 seconds of silence in DUPLEX mode while the devices spin up and synchronize.
6017 duplexPrerollBytes = (int) ( 0.5 * stream_.sampleRate * formatBytes( stream_.deviceFormat[1] ) * stream_.nDeviceChannels[1] );
6021 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6023 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6024 result = buffer->Play( 0, 0, DSBPLAY_LOOPING );
6025 if ( FAILED( result ) ) {
6026 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting output buffer!";
6027 errorText_ = errorStream_.str();
6032 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6034 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6035 result = buffer->Start( DSCBSTART_LOOPING );
6036 if ( FAILED( result ) ) {
6037 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting input buffer!";
6038 errorText_ = errorStream_.str();
6043 handle->drainCounter = 0;
6044 handle->internalDrain = false;
6045 ResetEvent( handle->condition );
6046 stream_.state = STREAM_RUNNING;
6049 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
6052 void RtApiDs :: stopStream()
6055 if ( stream_.state == STREAM_STOPPED ) {
6056 errorText_ = "RtApiDs::stopStream(): the stream is already stopped!";
6057 error( RtAudioError::WARNING );
6064 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6065 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6066 if ( handle->drainCounter == 0 ) {
6067 handle->drainCounter = 2;
6068 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
6071 stream_.state = STREAM_STOPPED;
6073 MUTEX_LOCK( &stream_.mutex );
6075 // Stop the buffer and clear memory
6076 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6077 result = buffer->Stop();
6078 if ( FAILED( result ) ) {
6079 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping output buffer!";
6080 errorText_ = errorStream_.str();
6084 // Lock the buffer and clear it so that if we start to play again,
6085 // we won't have old data playing.
6086 result = buffer->Lock( 0, handle->dsBufferSize[0], &audioPtr, &dataLen, NULL, NULL, 0 );
6087 if ( FAILED( result ) ) {
6088 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking output buffer!";
6089 errorText_ = errorStream_.str();
6093 // Zero the DS buffer
6094 ZeroMemory( audioPtr, dataLen );
6096 // Unlock the DS buffer
6097 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6098 if ( FAILED( result ) ) {
6099 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking output buffer!";
6100 errorText_ = errorStream_.str();
6104 // If we start playing again, we must begin at beginning of buffer.
6105 handle->bufferPointer[0] = 0;
6108 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6109 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6113 stream_.state = STREAM_STOPPED;
6115 if ( stream_.mode != DUPLEX )
6116 MUTEX_LOCK( &stream_.mutex );
6118 result = buffer->Stop();
6119 if ( FAILED( result ) ) {
6120 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping input buffer!";
6121 errorText_ = errorStream_.str();
6125 // Lock the buffer and clear it so that if we start to play again,
6126 // we won't have old data playing.
6127 result = buffer->Lock( 0, handle->dsBufferSize[1], &audioPtr, &dataLen, NULL, NULL, 0 );
6128 if ( FAILED( result ) ) {
6129 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking input buffer!";
6130 errorText_ = errorStream_.str();
6134 // Zero the DS buffer
6135 ZeroMemory( audioPtr, dataLen );
6137 // Unlock the DS buffer
6138 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6139 if ( FAILED( result ) ) {
6140 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking input buffer!";
6141 errorText_ = errorStream_.str();
6145 // If we start recording again, we must begin at beginning of buffer.
6146 handle->bufferPointer[1] = 0;
6150 timeEndPeriod( 1 ); // revert to normal scheduler frequency on lesser windows.
6151 MUTEX_UNLOCK( &stream_.mutex );
6153 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
6156 void RtApiDs :: abortStream()
6159 if ( stream_.state == STREAM_STOPPED ) {
6160 errorText_ = "RtApiDs::abortStream(): the stream is already stopped!";
6161 error( RtAudioError::WARNING );
6165 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6166 handle->drainCounter = 2;
6171 void RtApiDs :: callbackEvent()
6173 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) {
6174 Sleep( 50 ); // sleep 50 milliseconds
6178 if ( stream_.state == STREAM_CLOSED ) {
6179 errorText_ = "RtApiDs::callbackEvent(): the stream is closed ... this shouldn't happen!";
6180 error( RtAudioError::WARNING );
6184 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
6185 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6187 // Check if we were draining the stream and signal is finished.
6188 if ( handle->drainCounter > stream_.nBuffers + 2 ) {
6190 stream_.state = STREAM_STOPPING;
6191 if ( handle->internalDrain == false )
6192 SetEvent( handle->condition );
6198 // Invoke user callback to get fresh output data UNLESS we are
6200 if ( handle->drainCounter == 0 ) {
6201 RtAudioCallback callback = (RtAudioCallback) info->callback;
6202 double streamTime = getStreamTime();
6203 RtAudioStreamStatus status = 0;
6204 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
6205 status |= RTAUDIO_OUTPUT_UNDERFLOW;
6206 handle->xrun[0] = false;
6208 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
6209 status |= RTAUDIO_INPUT_OVERFLOW;
6210 handle->xrun[1] = false;
6212 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
6213 stream_.bufferSize, streamTime, status, info->userData );
6214 if ( cbReturnValue == 2 ) {
6215 stream_.state = STREAM_STOPPING;
6216 handle->drainCounter = 2;
6220 else if ( cbReturnValue == 1 ) {
6221 handle->drainCounter = 1;
6222 handle->internalDrain = true;
6227 DWORD currentWritePointer, safeWritePointer;
6228 DWORD currentReadPointer, safeReadPointer;
6229 UINT nextWritePointer;
6231 LPVOID buffer1 = NULL;
6232 LPVOID buffer2 = NULL;
6233 DWORD bufferSize1 = 0;
6234 DWORD bufferSize2 = 0;
6239 MUTEX_LOCK( &stream_.mutex );
6240 if ( stream_.state == STREAM_STOPPED ) {
6241 MUTEX_UNLOCK( &stream_.mutex );
6245 if ( buffersRolling == false ) {
6246 if ( stream_.mode == DUPLEX ) {
6247 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
6249 // It takes a while for the devices to get rolling. As a result,
6250 // there's no guarantee that the capture and write device pointers
6251 // will move in lockstep. Wait here for both devices to start
6252 // rolling, and then set our buffer pointers accordingly.
6253 // e.g. Crystal Drivers: the capture buffer starts up 5700 to 9600
6254 // bytes later than the write buffer.
6256 // Stub: a serious risk of having a pre-emptive scheduling round
6257 // take place between the two GetCurrentPosition calls... but I'm
6258 // really not sure how to solve the problem. Temporarily boost to
6259 // Realtime priority, maybe; but I'm not sure what priority the
6260 // DirectSound service threads run at. We *should* be roughly
6261 // within a ms or so of correct.
6263 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6264 LPDIRECTSOUNDCAPTUREBUFFER dsCaptureBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6266 DWORD startSafeWritePointer, startSafeReadPointer;
6268 result = dsWriteBuffer->GetCurrentPosition( NULL, &startSafeWritePointer );
6269 if ( FAILED( result ) ) {
6270 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6271 errorText_ = errorStream_.str();
6272 MUTEX_UNLOCK( &stream_.mutex );
6273 error( RtAudioError::SYSTEM_ERROR );
6276 result = dsCaptureBuffer->GetCurrentPosition( NULL, &startSafeReadPointer );
6277 if ( FAILED( result ) ) {
6278 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6279 errorText_ = errorStream_.str();
6280 MUTEX_UNLOCK( &stream_.mutex );
6281 error( RtAudioError::SYSTEM_ERROR );
6285 result = dsWriteBuffer->GetCurrentPosition( NULL, &safeWritePointer );
6286 if ( FAILED( result ) ) {
6287 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6288 errorText_ = errorStream_.str();
6289 MUTEX_UNLOCK( &stream_.mutex );
6290 error( RtAudioError::SYSTEM_ERROR );
6293 result = dsCaptureBuffer->GetCurrentPosition( NULL, &safeReadPointer );
6294 if ( FAILED( result ) ) {
6295 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6296 errorText_ = errorStream_.str();
6297 MUTEX_UNLOCK( &stream_.mutex );
6298 error( RtAudioError::SYSTEM_ERROR );
6301 if ( safeWritePointer != startSafeWritePointer && safeReadPointer != startSafeReadPointer ) break;
6305 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
6307 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
6308 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
6309 handle->bufferPointer[1] = safeReadPointer;
6311 else if ( stream_.mode == OUTPUT ) {
6313 // Set the proper nextWritePosition after initial startup.
6314 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6315 result = dsWriteBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
6316 if ( FAILED( result ) ) {
6317 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6318 errorText_ = errorStream_.str();
6319 MUTEX_UNLOCK( &stream_.mutex );
6320 error( RtAudioError::SYSTEM_ERROR );
6323 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
6324 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
6327 buffersRolling = true;
6330 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6332 LPDIRECTSOUNDBUFFER dsBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6334 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
6335 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
6336 bufferBytes *= formatBytes( stream_.userFormat );
6337 memset( stream_.userBuffer[0], 0, bufferBytes );
6340 // Setup parameters and do buffer conversion if necessary.
6341 if ( stream_.doConvertBuffer[0] ) {
6342 buffer = stream_.deviceBuffer;
6343 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
6344 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[0];
6345 bufferBytes *= formatBytes( stream_.deviceFormat[0] );
6348 buffer = stream_.userBuffer[0];
6349 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
6350 bufferBytes *= formatBytes( stream_.userFormat );
6353 // No byte swapping necessary in DirectSound implementation.
6355 // Ahhh ... windoze. 16-bit data is signed but 8-bit data is
6356 // unsigned. So, we need to convert our signed 8-bit data here to
6358 if ( stream_.deviceFormat[0] == RTAUDIO_SINT8 )
6359 for ( int i=0; i<bufferBytes; i++ ) buffer[i] = (unsigned char) ( buffer[i] + 128 );
6361 DWORD dsBufferSize = handle->dsBufferSize[0];
6362 nextWritePointer = handle->bufferPointer[0];
6364 DWORD endWrite, leadPointer;
6366 // Find out where the read and "safe write" pointers are.
6367 result = dsBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
6368 if ( FAILED( result ) ) {
6369 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6370 errorText_ = errorStream_.str();
6371 MUTEX_UNLOCK( &stream_.mutex );
6372 error( RtAudioError::SYSTEM_ERROR );
6376 // We will copy our output buffer into the region between
6377 // safeWritePointer and leadPointer. If leadPointer is not
6378 // beyond the next endWrite position, wait until it is.
6379 leadPointer = safeWritePointer + handle->dsPointerLeadTime[0];
6380 //std::cout << "safeWritePointer = " << safeWritePointer << ", leadPointer = " << leadPointer << ", nextWritePointer = " << nextWritePointer << std::endl;
6381 if ( leadPointer > dsBufferSize ) leadPointer -= dsBufferSize;
6382 if ( leadPointer < nextWritePointer ) leadPointer += dsBufferSize; // unwrap offset
6383 endWrite = nextWritePointer + bufferBytes;
6385 // Check whether the entire write region is behind the play pointer.
6386 if ( leadPointer >= endWrite ) break;
6388 // If we are here, then we must wait until the leadPointer advances
6389 // beyond the end of our next write region. We use the
6390 // Sleep() function to suspend operation until that happens.
6391 double millis = ( endWrite - leadPointer ) * 1000.0;
6392 millis /= ( formatBytes( stream_.deviceFormat[0]) * stream_.nDeviceChannels[0] * stream_.sampleRate);
6393 if ( millis < 1.0 ) millis = 1.0;
6394 Sleep( (DWORD) millis );
6397 if ( dsPointerBetween( nextWritePointer, safeWritePointer, currentWritePointer, dsBufferSize )
6398 || dsPointerBetween( endWrite, safeWritePointer, currentWritePointer, dsBufferSize ) ) {
6399 // We've strayed into the forbidden zone ... resync the read pointer.
6400 handle->xrun[0] = true;
6401 nextWritePointer = safeWritePointer + handle->dsPointerLeadTime[0] - bufferBytes;
6402 if ( nextWritePointer >= dsBufferSize ) nextWritePointer -= dsBufferSize;
6403 handle->bufferPointer[0] = nextWritePointer;
6404 endWrite = nextWritePointer + bufferBytes;
6407 // Lock free space in the buffer
6408 result = dsBuffer->Lock( nextWritePointer, bufferBytes, &buffer1,
6409 &bufferSize1, &buffer2, &bufferSize2, 0 );
6410 if ( FAILED( result ) ) {
6411 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking buffer during playback!";
6412 errorText_ = errorStream_.str();
6413 MUTEX_UNLOCK( &stream_.mutex );
6414 error( RtAudioError::SYSTEM_ERROR );
6418 // Copy our buffer into the DS buffer
6419 CopyMemory( buffer1, buffer, bufferSize1 );
6420 if ( buffer2 != NULL ) CopyMemory( buffer2, buffer+bufferSize1, bufferSize2 );
6422 // Update our buffer offset and unlock sound buffer
6423 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
6424 if ( FAILED( result ) ) {
6425 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking buffer during playback!";
6426 errorText_ = errorStream_.str();
6427 MUTEX_UNLOCK( &stream_.mutex );
6428 error( RtAudioError::SYSTEM_ERROR );
6431 nextWritePointer = ( nextWritePointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
6432 handle->bufferPointer[0] = nextWritePointer;
6435 // Don't bother draining input
6436 if ( handle->drainCounter ) {
6437 handle->drainCounter++;
6441 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6443 // Setup parameters.
6444 if ( stream_.doConvertBuffer[1] ) {
6445 buffer = stream_.deviceBuffer;
6446 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[1];
6447 bufferBytes *= formatBytes( stream_.deviceFormat[1] );
6450 buffer = stream_.userBuffer[1];
6451 bufferBytes = stream_.bufferSize * stream_.nUserChannels[1];
6452 bufferBytes *= formatBytes( stream_.userFormat );
6455 LPDIRECTSOUNDCAPTUREBUFFER dsBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6456 long nextReadPointer = handle->bufferPointer[1];
6457 DWORD dsBufferSize = handle->dsBufferSize[1];
6459 // Find out where the write and "safe read" pointers are.
6460 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
6461 if ( FAILED( result ) ) {
6462 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6463 errorText_ = errorStream_.str();
6464 MUTEX_UNLOCK( &stream_.mutex );
6465 error( RtAudioError::SYSTEM_ERROR );
6469 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
6470 DWORD endRead = nextReadPointer + bufferBytes;
6472 // Handling depends on whether we are INPUT or DUPLEX.
6473 // If we're in INPUT mode then waiting is a good thing. If we're in DUPLEX mode,
6474 // then a wait here will drag the write pointers into the forbidden zone.
6476 // In DUPLEX mode, rather than wait, we will back off the read pointer until
6477 // it's in a safe position. This causes dropouts, but it seems to be the only
6478 // practical way to sync up the read and write pointers reliably, given the
6479 // the very complex relationship between phase and increment of the read and write
6482 // In order to minimize audible dropouts in DUPLEX mode, we will
6483 // provide a pre-roll period of 0.5 seconds in which we return
6484 // zeros from the read buffer while the pointers sync up.
6486 if ( stream_.mode == DUPLEX ) {
6487 if ( safeReadPointer < endRead ) {
6488 if ( duplexPrerollBytes <= 0 ) {
6489 // Pre-roll time over. Be more agressive.
6490 int adjustment = endRead-safeReadPointer;
6492 handle->xrun[1] = true;
6494 // - large adjustments: we've probably run out of CPU cycles, so just resync exactly,
6495 // and perform fine adjustments later.
6496 // - small adjustments: back off by twice as much.
6497 if ( adjustment >= 2*bufferBytes )
6498 nextReadPointer = safeReadPointer-2*bufferBytes;
6500 nextReadPointer = safeReadPointer-bufferBytes-adjustment;
6502 if ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
6506 // In pre=roll time. Just do it.
6507 nextReadPointer = safeReadPointer - bufferBytes;
6508 while ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
6510 endRead = nextReadPointer + bufferBytes;
6513 else { // mode == INPUT
6514 while ( safeReadPointer < endRead && stream_.callbackInfo.isRunning ) {
6515 // See comments for playback.
6516 double millis = (endRead - safeReadPointer) * 1000.0;
6517 millis /= ( formatBytes(stream_.deviceFormat[1]) * stream_.nDeviceChannels[1] * stream_.sampleRate);
6518 if ( millis < 1.0 ) millis = 1.0;
6519 Sleep( (DWORD) millis );
6521 // Wake up and find out where we are now.
6522 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
6523 if ( FAILED( result ) ) {
6524 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6525 errorText_ = errorStream_.str();
6526 MUTEX_UNLOCK( &stream_.mutex );
6527 error( RtAudioError::SYSTEM_ERROR );
6531 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
6535 // Lock free space in the buffer
6536 result = dsBuffer->Lock( nextReadPointer, bufferBytes, &buffer1,
6537 &bufferSize1, &buffer2, &bufferSize2, 0 );
6538 if ( FAILED( result ) ) {
6539 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking capture buffer!";
6540 errorText_ = errorStream_.str();
6541 MUTEX_UNLOCK( &stream_.mutex );
6542 error( RtAudioError::SYSTEM_ERROR );
6546 if ( duplexPrerollBytes <= 0 ) {
6547 // Copy our buffer into the DS buffer
6548 CopyMemory( buffer, buffer1, bufferSize1 );
6549 if ( buffer2 != NULL ) CopyMemory( buffer+bufferSize1, buffer2, bufferSize2 );
6552 memset( buffer, 0, bufferSize1 );
6553 if ( buffer2 != NULL ) memset( buffer + bufferSize1, 0, bufferSize2 );
6554 duplexPrerollBytes -= bufferSize1 + bufferSize2;
6557 // Update our buffer offset and unlock sound buffer
6558 nextReadPointer = ( nextReadPointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
6559 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
6560 if ( FAILED( result ) ) {
6561 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking capture buffer!";
6562 errorText_ = errorStream_.str();
6563 MUTEX_UNLOCK( &stream_.mutex );
6564 error( RtAudioError::SYSTEM_ERROR );
6567 handle->bufferPointer[1] = nextReadPointer;
6569 // No byte swapping necessary in DirectSound implementation.
6571 // If necessary, convert 8-bit data from unsigned to signed.
6572 if ( stream_.deviceFormat[1] == RTAUDIO_SINT8 )
6573 for ( int j=0; j<bufferBytes; j++ ) buffer[j] = (signed char) ( buffer[j] - 128 );
6575 // Do buffer conversion if necessary.
6576 if ( stream_.doConvertBuffer[1] )
6577 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
6581 MUTEX_UNLOCK( &stream_.mutex );
6582 RtApi::tickStreamTime();
6585 // Definitions for utility functions and callbacks
6586 // specific to the DirectSound implementation.
6588 static unsigned __stdcall callbackHandler( void *ptr )
6590 CallbackInfo *info = (CallbackInfo *) ptr;
6591 RtApiDs *object = (RtApiDs *) info->object;
6592 bool* isRunning = &info->isRunning;
6594 while ( *isRunning == true ) {
6595 object->callbackEvent();
6602 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
6603 LPCTSTR description,
6607 struct DsProbeData& probeInfo = *(struct DsProbeData*) lpContext;
6608 std::vector<struct DsDevice>& dsDevices = *probeInfo.dsDevices;
6611 bool validDevice = false;
6612 if ( probeInfo.isInput == true ) {
6614 LPDIRECTSOUNDCAPTURE object;
6616 hr = DirectSoundCaptureCreate( lpguid, &object, NULL );
6617 if ( hr != DS_OK ) return TRUE;
6619 caps.dwSize = sizeof(caps);
6620 hr = object->GetCaps( &caps );
6621 if ( hr == DS_OK ) {
6622 if ( caps.dwChannels > 0 && caps.dwFormats > 0 )
6629 LPDIRECTSOUND object;
6630 hr = DirectSoundCreate( lpguid, &object, NULL );
6631 if ( hr != DS_OK ) return TRUE;
6633 caps.dwSize = sizeof(caps);
6634 hr = object->GetCaps( &caps );
6635 if ( hr == DS_OK ) {
6636 if ( caps.dwFlags & DSCAPS_PRIMARYMONO || caps.dwFlags & DSCAPS_PRIMARYSTEREO )
6642 // If good device, then save its name and guid.
6643 std::string name = convertCharPointerToStdString( description );
6644 //if ( name == "Primary Sound Driver" || name == "Primary Sound Capture Driver" )
6645 if ( lpguid == NULL )
6646 name = "Default Device";
6647 if ( validDevice ) {
6648 for ( unsigned int i=0; i<dsDevices.size(); i++ ) {
6649 if ( dsDevices[i].name == name ) {
6650 dsDevices[i].found = true;
6651 if ( probeInfo.isInput ) {
6652 dsDevices[i].id[1] = lpguid;
6653 dsDevices[i].validId[1] = true;
6656 dsDevices[i].id[0] = lpguid;
6657 dsDevices[i].validId[0] = true;
6665 device.found = true;
6666 if ( probeInfo.isInput ) {
6667 device.id[1] = lpguid;
6668 device.validId[1] = true;
6671 device.id[0] = lpguid;
6672 device.validId[0] = true;
6674 dsDevices.push_back( device );
6680 static const char* getErrorString( int code )
6684 case DSERR_ALLOCATED:
6685 return "Already allocated";
6687 case DSERR_CONTROLUNAVAIL:
6688 return "Control unavailable";
6690 case DSERR_INVALIDPARAM:
6691 return "Invalid parameter";
6693 case DSERR_INVALIDCALL:
6694 return "Invalid call";
6697 return "Generic error";
6699 case DSERR_PRIOLEVELNEEDED:
6700 return "Priority level needed";
6702 case DSERR_OUTOFMEMORY:
6703 return "Out of memory";
6705 case DSERR_BADFORMAT:
6706 return "The sample rate or the channel format is not supported";
6708 case DSERR_UNSUPPORTED:
6709 return "Not supported";
6711 case DSERR_NODRIVER:
6714 case DSERR_ALREADYINITIALIZED:
6715 return "Already initialized";
6717 case DSERR_NOAGGREGATION:
6718 return "No aggregation";
6720 case DSERR_BUFFERLOST:
6721 return "Buffer lost";
6723 case DSERR_OTHERAPPHASPRIO:
6724 return "Another application already has priority";
6726 case DSERR_UNINITIALIZED:
6727 return "Uninitialized";
6730 return "DirectSound unknown error";
6733 //******************** End of __WINDOWS_DS__ *********************//
6737 #if defined(__LINUX_ALSA__)
6739 #include <alsa/asoundlib.h>
6742 // A structure to hold various information related to the ALSA API
6745 snd_pcm_t *handles[2];
6748 pthread_cond_t runnable_cv;
6752 :synchronized(false), runnable(false) { xrun[0] = false; xrun[1] = false; }
6755 static void *alsaCallbackHandler( void * ptr );
6757 RtApiAlsa :: RtApiAlsa()
6759 // Nothing to do here.
6762 RtApiAlsa :: ~RtApiAlsa()
6764 if ( stream_.state != STREAM_CLOSED ) closeStream();
6767 unsigned int RtApiAlsa :: getDeviceCount( void )
6769 unsigned nDevices = 0;
6770 int result, subdevice, card;
6774 // Count cards and devices
6776 snd_card_next( &card );
6777 while ( card >= 0 ) {
6778 sprintf( name, "hw:%d", card );
6779 result = snd_ctl_open( &handle, name, 0 );
6781 errorStream_ << "RtApiAlsa::getDeviceCount: control open, card = " << card << ", " << snd_strerror( result ) << ".";
6782 errorText_ = errorStream_.str();
6783 error( RtAudioError::WARNING );
6788 result = snd_ctl_pcm_next_device( handle, &subdevice );
6790 errorStream_ << "RtApiAlsa::getDeviceCount: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
6791 errorText_ = errorStream_.str();
6792 error( RtAudioError::WARNING );
6795 if ( subdevice < 0 )
6800 snd_ctl_close( handle );
6801 snd_card_next( &card );
6804 result = snd_ctl_open( &handle, "default", 0 );
6807 snd_ctl_close( handle );
6813 RtAudio::DeviceInfo RtApiAlsa :: getDeviceInfo( unsigned int device )
6815 RtAudio::DeviceInfo info;
6816 info.probed = false;
6818 unsigned nDevices = 0;
6819 int result, subdevice, card;
6823 // Count cards and devices
6826 snd_card_next( &card );
6827 while ( card >= 0 ) {
6828 sprintf( name, "hw:%d", card );
6829 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
6831 errorStream_ << "RtApiAlsa::getDeviceInfo: control open, card = " << card << ", " << snd_strerror( result ) << ".";
6832 errorText_ = errorStream_.str();
6833 error( RtAudioError::WARNING );
6838 result = snd_ctl_pcm_next_device( chandle, &subdevice );
6840 errorStream_ << "RtApiAlsa::getDeviceInfo: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
6841 errorText_ = errorStream_.str();
6842 error( RtAudioError::WARNING );
6845 if ( subdevice < 0 ) break;
6846 if ( nDevices == device ) {
6847 sprintf( name, "hw:%d,%d", card, subdevice );
6853 snd_ctl_close( chandle );
6854 snd_card_next( &card );
6857 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
6858 if ( result == 0 ) {
6859 if ( nDevices == device ) {
6860 strcpy( name, "default" );
6866 if ( nDevices == 0 ) {
6867 errorText_ = "RtApiAlsa::getDeviceInfo: no devices found!";
6868 error( RtAudioError::INVALID_USE );
6872 if ( device >= nDevices ) {
6873 errorText_ = "RtApiAlsa::getDeviceInfo: device ID is invalid!";
6874 error( RtAudioError::INVALID_USE );
6880 // If a stream is already open, we cannot probe the stream devices.
6881 // Thus, use the saved results.
6882 if ( stream_.state != STREAM_CLOSED &&
6883 ( stream_.device[0] == device || stream_.device[1] == device ) ) {
6884 snd_ctl_close( chandle );
6885 if ( device >= devices_.size() ) {
6886 errorText_ = "RtApiAlsa::getDeviceInfo: device ID was not present before stream was opened.";
6887 error( RtAudioError::WARNING );
6890 return devices_[ device ];
6893 int openMode = SND_PCM_ASYNC;
6894 snd_pcm_stream_t stream;
6895 snd_pcm_info_t *pcminfo;
6896 snd_pcm_info_alloca( &pcminfo );
6898 snd_pcm_hw_params_t *params;
6899 snd_pcm_hw_params_alloca( ¶ms );
6901 // First try for playback unless default device (which has subdev -1)
6902 stream = SND_PCM_STREAM_PLAYBACK;
6903 snd_pcm_info_set_stream( pcminfo, stream );
6904 if ( subdevice != -1 ) {
6905 snd_pcm_info_set_device( pcminfo, subdevice );
6906 snd_pcm_info_set_subdevice( pcminfo, 0 );
6908 result = snd_ctl_pcm_info( chandle, pcminfo );
6910 // Device probably doesn't support playback.
6915 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK );
6917 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
6918 errorText_ = errorStream_.str();
6919 error( RtAudioError::WARNING );
6923 // The device is open ... fill the parameter structure.
6924 result = snd_pcm_hw_params_any( phandle, params );
6926 snd_pcm_close( phandle );
6927 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
6928 errorText_ = errorStream_.str();
6929 error( RtAudioError::WARNING );
6933 // Get output channel information.
6935 result = snd_pcm_hw_params_get_channels_max( params, &value );
6937 snd_pcm_close( phandle );
6938 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") output channels, " << snd_strerror( result ) << ".";
6939 errorText_ = errorStream_.str();
6940 error( RtAudioError::WARNING );
6943 info.outputChannels = value;
6944 snd_pcm_close( phandle );
6947 stream = SND_PCM_STREAM_CAPTURE;
6948 snd_pcm_info_set_stream( pcminfo, stream );
6950 // Now try for capture unless default device (with subdev = -1)
6951 if ( subdevice != -1 ) {
6952 result = snd_ctl_pcm_info( chandle, pcminfo );
6953 snd_ctl_close( chandle );
6955 // Device probably doesn't support capture.
6956 if ( info.outputChannels == 0 ) return info;
6957 goto probeParameters;
6961 snd_ctl_close( chandle );
6963 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
6965 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
6966 errorText_ = errorStream_.str();
6967 error( RtAudioError::WARNING );
6968 if ( info.outputChannels == 0 ) return info;
6969 goto probeParameters;
6972 // The device is open ... fill the parameter structure.
6973 result = snd_pcm_hw_params_any( phandle, params );
6975 snd_pcm_close( phandle );
6976 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
6977 errorText_ = errorStream_.str();
6978 error( RtAudioError::WARNING );
6979 if ( info.outputChannels == 0 ) return info;
6980 goto probeParameters;
6983 result = snd_pcm_hw_params_get_channels_max( params, &value );
6985 snd_pcm_close( phandle );
6986 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") input channels, " << snd_strerror( result ) << ".";
6987 errorText_ = errorStream_.str();
6988 error( RtAudioError::WARNING );
6989 if ( info.outputChannels == 0 ) return info;
6990 goto probeParameters;
6992 info.inputChannels = value;
6993 snd_pcm_close( phandle );
6995 // If device opens for both playback and capture, we determine the channels.
6996 if ( info.outputChannels > 0 && info.inputChannels > 0 )
6997 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
6999 // ALSA doesn't provide default devices so we'll use the first available one.
7000 if ( device == 0 && info.outputChannels > 0 )
7001 info.isDefaultOutput = true;
7002 if ( device == 0 && info.inputChannels > 0 )
7003 info.isDefaultInput = true;
7006 // At this point, we just need to figure out the supported data
7007 // formats and sample rates. We'll proceed by opening the device in
7008 // the direction with the maximum number of channels, or playback if
7009 // they are equal. This might limit our sample rate options, but so
7012 if ( info.outputChannels >= info.inputChannels )
7013 stream = SND_PCM_STREAM_PLAYBACK;
7015 stream = SND_PCM_STREAM_CAPTURE;
7016 snd_pcm_info_set_stream( pcminfo, stream );
7018 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
7020 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7021 errorText_ = errorStream_.str();
7022 error( RtAudioError::WARNING );
7026 // The device is open ... fill the parameter structure.
7027 result = snd_pcm_hw_params_any( phandle, params );
7029 snd_pcm_close( phandle );
7030 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7031 errorText_ = errorStream_.str();
7032 error( RtAudioError::WARNING );
7036 // Test our discrete set of sample rate values.
7037 info.sampleRates.clear();
7038 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
7039 if ( snd_pcm_hw_params_test_rate( phandle, params, SAMPLE_RATES[i], 0 ) == 0 ) {
7040 info.sampleRates.push_back( SAMPLE_RATES[i] );
7042 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
7043 info.preferredSampleRate = SAMPLE_RATES[i];
7046 if ( info.sampleRates.size() == 0 ) {
7047 snd_pcm_close( phandle );
7048 errorStream_ << "RtApiAlsa::getDeviceInfo: no supported sample rates found for device (" << name << ").";
7049 errorText_ = errorStream_.str();
7050 error( RtAudioError::WARNING );
7054 // Probe the supported data formats ... we don't care about endian-ness just yet
7055 snd_pcm_format_t format;
7056 info.nativeFormats = 0;
7057 format = SND_PCM_FORMAT_S8;
7058 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7059 info.nativeFormats |= RTAUDIO_SINT8;
7060 format = SND_PCM_FORMAT_S16;
7061 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7062 info.nativeFormats |= RTAUDIO_SINT16;
7063 format = SND_PCM_FORMAT_S24;
7064 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7065 info.nativeFormats |= RTAUDIO_SINT24;
7066 format = SND_PCM_FORMAT_S32;
7067 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7068 info.nativeFormats |= RTAUDIO_SINT32;
7069 format = SND_PCM_FORMAT_FLOAT;
7070 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7071 info.nativeFormats |= RTAUDIO_FLOAT32;
7072 format = SND_PCM_FORMAT_FLOAT64;
7073 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7074 info.nativeFormats |= RTAUDIO_FLOAT64;
7076 // Check that we have at least one supported format
7077 if ( info.nativeFormats == 0 ) {
7078 snd_pcm_close( phandle );
7079 errorStream_ << "RtApiAlsa::getDeviceInfo: pcm device (" << name << ") data format not supported by RtAudio.";
7080 errorText_ = errorStream_.str();
7081 error( RtAudioError::WARNING );
7085 // Get the device name
7087 result = snd_card_get_name( card, &cardname );
7088 if ( result >= 0 ) {
7089 sprintf( name, "hw:%s,%d", cardname, subdevice );
7094 // That's all ... close the device and return
7095 snd_pcm_close( phandle );
7100 void RtApiAlsa :: saveDeviceInfo( void )
7104 unsigned int nDevices = getDeviceCount();
7105 devices_.resize( nDevices );
7106 for ( unsigned int i=0; i<nDevices; i++ )
7107 devices_[i] = getDeviceInfo( i );
7110 bool RtApiAlsa :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
7111 unsigned int firstChannel, unsigned int sampleRate,
7112 RtAudioFormat format, unsigned int *bufferSize,
7113 RtAudio::StreamOptions *options )
7116 #if defined(__RTAUDIO_DEBUG__)
7118 snd_output_stdio_attach(&out, stderr, 0);
7121 // I'm not using the "plug" interface ... too much inconsistent behavior.
7123 unsigned nDevices = 0;
7124 int result, subdevice, card;
7128 if ( options && options->flags & RTAUDIO_ALSA_USE_DEFAULT )
7129 snprintf(name, sizeof(name), "%s", "default");
7131 // Count cards and devices
7133 snd_card_next( &card );
7134 while ( card >= 0 ) {
7135 sprintf( name, "hw:%d", card );
7136 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
7138 errorStream_ << "RtApiAlsa::probeDeviceOpen: control open, card = " << card << ", " << snd_strerror( result ) << ".";
7139 errorText_ = errorStream_.str();
7144 result = snd_ctl_pcm_next_device( chandle, &subdevice );
7145 if ( result < 0 ) break;
7146 if ( subdevice < 0 ) break;
7147 if ( nDevices == device ) {
7148 sprintf( name, "hw:%d,%d", card, subdevice );
7149 snd_ctl_close( chandle );
7154 snd_ctl_close( chandle );
7155 snd_card_next( &card );
7158 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
7159 if ( result == 0 ) {
7160 if ( nDevices == device ) {
7161 strcpy( name, "default" );
7167 if ( nDevices == 0 ) {
7168 // This should not happen because a check is made before this function is called.
7169 errorText_ = "RtApiAlsa::probeDeviceOpen: no devices found!";
7173 if ( device >= nDevices ) {
7174 // This should not happen because a check is made before this function is called.
7175 errorText_ = "RtApiAlsa::probeDeviceOpen: device ID is invalid!";
7182 // The getDeviceInfo() function will not work for a device that is
7183 // already open. Thus, we'll probe the system before opening a
7184 // stream and save the results for use by getDeviceInfo().
7185 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) // only do once
7186 this->saveDeviceInfo();
7188 snd_pcm_stream_t stream;
7189 if ( mode == OUTPUT )
7190 stream = SND_PCM_STREAM_PLAYBACK;
7192 stream = SND_PCM_STREAM_CAPTURE;
7195 int openMode = SND_PCM_ASYNC;
7196 result = snd_pcm_open( &phandle, name, stream, openMode );
7198 if ( mode == OUTPUT )
7199 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for output.";
7201 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for input.";
7202 errorText_ = errorStream_.str();
7206 // Fill the parameter structure.
7207 snd_pcm_hw_params_t *hw_params;
7208 snd_pcm_hw_params_alloca( &hw_params );
7209 result = snd_pcm_hw_params_any( phandle, hw_params );
7211 snd_pcm_close( phandle );
7212 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") parameters, " << snd_strerror( result ) << ".";
7213 errorText_ = errorStream_.str();
7217 #if defined(__RTAUDIO_DEBUG__)
7218 fprintf( stderr, "\nRtApiAlsa: dump hardware params just after device open:\n\n" );
7219 snd_pcm_hw_params_dump( hw_params, out );
7222 // Set access ... check user preference.
7223 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) {
7224 stream_.userInterleaved = false;
7225 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
7227 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
7228 stream_.deviceInterleaved[mode] = true;
7231 stream_.deviceInterleaved[mode] = false;
7234 stream_.userInterleaved = true;
7235 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
7237 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
7238 stream_.deviceInterleaved[mode] = false;
7241 stream_.deviceInterleaved[mode] = true;
7245 snd_pcm_close( phandle );
7246 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") access, " << snd_strerror( result ) << ".";
7247 errorText_ = errorStream_.str();
7251 // Determine how to set the device format.
7252 stream_.userFormat = format;
7253 snd_pcm_format_t deviceFormat = SND_PCM_FORMAT_UNKNOWN;
7255 if ( format == RTAUDIO_SINT8 )
7256 deviceFormat = SND_PCM_FORMAT_S8;
7257 else if ( format == RTAUDIO_SINT16 )
7258 deviceFormat = SND_PCM_FORMAT_S16;
7259 else if ( format == RTAUDIO_SINT24 )
7260 deviceFormat = SND_PCM_FORMAT_S24;
7261 else if ( format == RTAUDIO_SINT32 )
7262 deviceFormat = SND_PCM_FORMAT_S32;
7263 else if ( format == RTAUDIO_FLOAT32 )
7264 deviceFormat = SND_PCM_FORMAT_FLOAT;
7265 else if ( format == RTAUDIO_FLOAT64 )
7266 deviceFormat = SND_PCM_FORMAT_FLOAT64;
7268 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat) == 0) {
7269 stream_.deviceFormat[mode] = format;
7273 // The user requested format is not natively supported by the device.
7274 deviceFormat = SND_PCM_FORMAT_FLOAT64;
7275 if ( snd_pcm_hw_params_test_format( phandle, hw_params, deviceFormat ) == 0 ) {
7276 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
7280 deviceFormat = SND_PCM_FORMAT_FLOAT;
7281 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7282 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
7286 deviceFormat = SND_PCM_FORMAT_S32;
7287 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7288 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
7292 deviceFormat = SND_PCM_FORMAT_S24;
7293 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7294 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
7298 deviceFormat = SND_PCM_FORMAT_S16;
7299 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7300 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
7304 deviceFormat = SND_PCM_FORMAT_S8;
7305 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7306 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
7310 // If we get here, no supported format was found.
7311 snd_pcm_close( phandle );
7312 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device " << device << " data format not supported by RtAudio.";
7313 errorText_ = errorStream_.str();
7317 result = snd_pcm_hw_params_set_format( phandle, hw_params, deviceFormat );
7319 snd_pcm_close( phandle );
7320 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") data format, " << snd_strerror( result ) << ".";
7321 errorText_ = errorStream_.str();
7325 // Determine whether byte-swaping is necessary.
7326 stream_.doByteSwap[mode] = false;
7327 if ( deviceFormat != SND_PCM_FORMAT_S8 ) {
7328 result = snd_pcm_format_cpu_endian( deviceFormat );
7330 stream_.doByteSwap[mode] = true;
7331 else if (result < 0) {
7332 snd_pcm_close( phandle );
7333 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") endian-ness, " << snd_strerror( result ) << ".";
7334 errorText_ = errorStream_.str();
7339 // Set the sample rate.
7340 result = snd_pcm_hw_params_set_rate_near( phandle, hw_params, (unsigned int*) &sampleRate, 0 );
7342 snd_pcm_close( phandle );
7343 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting sample rate on device (" << name << "), " << snd_strerror( result ) << ".";
7344 errorText_ = errorStream_.str();
7348 // Determine the number of channels for this device. We support a possible
7349 // minimum device channel number > than the value requested by the user.
7350 stream_.nUserChannels[mode] = channels;
7352 result = snd_pcm_hw_params_get_channels_max( hw_params, &value );
7353 unsigned int deviceChannels = value;
7354 if ( result < 0 || deviceChannels < channels + firstChannel ) {
7355 snd_pcm_close( phandle );
7356 errorStream_ << "RtApiAlsa::probeDeviceOpen: requested channel parameters not supported by device (" << name << "), " << snd_strerror( result ) << ".";
7357 errorText_ = errorStream_.str();
7361 result = snd_pcm_hw_params_get_channels_min( hw_params, &value );
7363 snd_pcm_close( phandle );
7364 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting minimum channels for device (" << name << "), " << snd_strerror( result ) << ".";
7365 errorText_ = errorStream_.str();
7368 deviceChannels = value;
7369 if ( deviceChannels < channels + firstChannel ) deviceChannels = channels + firstChannel;
7370 stream_.nDeviceChannels[mode] = deviceChannels;
7372 // Set the device channels.
7373 result = snd_pcm_hw_params_set_channels( phandle, hw_params, deviceChannels );
7375 snd_pcm_close( phandle );
7376 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting channels for device (" << name << "), " << snd_strerror( result ) << ".";
7377 errorText_ = errorStream_.str();
7381 // Set the buffer (or period) size.
7383 snd_pcm_uframes_t periodSize = *bufferSize;
7384 result = snd_pcm_hw_params_set_period_size_near( phandle, hw_params, &periodSize, &dir );
7386 snd_pcm_close( phandle );
7387 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting period size for device (" << name << "), " << snd_strerror( result ) << ".";
7388 errorText_ = errorStream_.str();
7391 *bufferSize = periodSize;
7393 // Set the buffer number, which in ALSA is referred to as the "period".
7394 unsigned int periods = 0;
7395 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) periods = 2;
7396 if ( options && options->numberOfBuffers > 0 ) periods = options->numberOfBuffers;
7397 if ( periods < 2 ) periods = 4; // a fairly safe default value
7398 result = snd_pcm_hw_params_set_periods_near( phandle, hw_params, &periods, &dir );
7400 snd_pcm_close( phandle );
7401 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting periods for device (" << name << "), " << snd_strerror( result ) << ".";
7402 errorText_ = errorStream_.str();
7406 // If attempting to setup a duplex stream, the bufferSize parameter
7407 // MUST be the same in both directions!
7408 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
7409 snd_pcm_close( phandle );
7410 errorStream_ << "RtApiAlsa::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << name << ").";
7411 errorText_ = errorStream_.str();
7415 stream_.bufferSize = *bufferSize;
7417 // Install the hardware configuration
7418 result = snd_pcm_hw_params( phandle, hw_params );
7420 snd_pcm_close( phandle );
7421 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing hardware configuration on device (" << name << "), " << snd_strerror( result ) << ".";
7422 errorText_ = errorStream_.str();
7426 #if defined(__RTAUDIO_DEBUG__)
7427 fprintf(stderr, "\nRtApiAlsa: dump hardware params after installation:\n\n");
7428 snd_pcm_hw_params_dump( hw_params, out );
7431 // Set the software configuration to fill buffers with zeros and prevent device stopping on xruns.
7432 snd_pcm_sw_params_t *sw_params = NULL;
7433 snd_pcm_sw_params_alloca( &sw_params );
7434 snd_pcm_sw_params_current( phandle, sw_params );
7435 snd_pcm_sw_params_set_start_threshold( phandle, sw_params, *bufferSize );
7436 snd_pcm_sw_params_set_stop_threshold( phandle, sw_params, ULONG_MAX );
7437 snd_pcm_sw_params_set_silence_threshold( phandle, sw_params, 0 );
7439 // The following two settings were suggested by Theo Veenker
7440 //snd_pcm_sw_params_set_avail_min( phandle, sw_params, *bufferSize );
7441 //snd_pcm_sw_params_set_xfer_align( phandle, sw_params, 1 );
7443 // here are two options for a fix
7444 //snd_pcm_sw_params_set_silence_size( phandle, sw_params, ULONG_MAX );
7445 snd_pcm_uframes_t val;
7446 snd_pcm_sw_params_get_boundary( sw_params, &val );
7447 snd_pcm_sw_params_set_silence_size( phandle, sw_params, val );
7449 result = snd_pcm_sw_params( phandle, sw_params );
7451 snd_pcm_close( phandle );
7452 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing software configuration on device (" << name << "), " << snd_strerror( result ) << ".";
7453 errorText_ = errorStream_.str();
7457 #if defined(__RTAUDIO_DEBUG__)
7458 fprintf(stderr, "\nRtApiAlsa: dump software params after installation:\n\n");
7459 snd_pcm_sw_params_dump( sw_params, out );
7462 // Set flags for buffer conversion
7463 stream_.doConvertBuffer[mode] = false;
7464 if ( stream_.userFormat != stream_.deviceFormat[mode] )
7465 stream_.doConvertBuffer[mode] = true;
7466 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
7467 stream_.doConvertBuffer[mode] = true;
7468 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
7469 stream_.nUserChannels[mode] > 1 )
7470 stream_.doConvertBuffer[mode] = true;
7472 // Allocate the ApiHandle if necessary and then save.
7473 AlsaHandle *apiInfo = 0;
7474 if ( stream_.apiHandle == 0 ) {
7476 apiInfo = (AlsaHandle *) new AlsaHandle;
7478 catch ( std::bad_alloc& ) {
7479 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating AlsaHandle memory.";
7483 if ( pthread_cond_init( &apiInfo->runnable_cv, NULL ) ) {
7484 errorText_ = "RtApiAlsa::probeDeviceOpen: error initializing pthread condition variable.";
7488 stream_.apiHandle = (void *) apiInfo;
7489 apiInfo->handles[0] = 0;
7490 apiInfo->handles[1] = 0;
7493 apiInfo = (AlsaHandle *) stream_.apiHandle;
7495 apiInfo->handles[mode] = phandle;
7498 // Allocate necessary internal buffers.
7499 unsigned long bufferBytes;
7500 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
7501 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
7502 if ( stream_.userBuffer[mode] == NULL ) {
7503 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating user buffer memory.";
7507 if ( stream_.doConvertBuffer[mode] ) {
7509 bool makeBuffer = true;
7510 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
7511 if ( mode == INPUT ) {
7512 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
7513 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
7514 if ( bufferBytes <= bytesOut ) makeBuffer = false;
7519 bufferBytes *= *bufferSize;
7520 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
7521 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
7522 if ( stream_.deviceBuffer == NULL ) {
7523 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating device buffer memory.";
7529 stream_.sampleRate = sampleRate;
7530 stream_.nBuffers = periods;
7531 stream_.device[mode] = device;
7532 stream_.state = STREAM_STOPPED;
7534 // Setup the buffer conversion information structure.
7535 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
7537 // Setup thread if necessary.
7538 if ( stream_.mode == OUTPUT && mode == INPUT ) {
7539 // We had already set up an output stream.
7540 stream_.mode = DUPLEX;
7541 // Link the streams if possible.
7542 apiInfo->synchronized = false;
7543 if ( snd_pcm_link( apiInfo->handles[0], apiInfo->handles[1] ) == 0 )
7544 apiInfo->synchronized = true;
7546 errorText_ = "RtApiAlsa::probeDeviceOpen: unable to synchronize input and output devices.";
7547 error( RtAudioError::WARNING );
7551 stream_.mode = mode;
7553 // Setup callback thread.
7554 stream_.callbackInfo.object = (void *) this;
7556 // Set the thread attributes for joinable and realtime scheduling
7557 // priority (optional). The higher priority will only take affect
7558 // if the program is run as root or suid. Note, under Linux
7559 // processes with CAP_SYS_NICE privilege, a user can change
7560 // scheduling policy and priority (thus need not be root). See
7561 // POSIX "capabilities".
7562 pthread_attr_t attr;
7563 pthread_attr_init( &attr );
7564 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
7565 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
7566 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
7567 stream_.callbackInfo.doRealtime = true;
7568 struct sched_param param;
7569 int priority = options->priority;
7570 int min = sched_get_priority_min( SCHED_RR );
7571 int max = sched_get_priority_max( SCHED_RR );
7572 if ( priority < min ) priority = min;
7573 else if ( priority > max ) priority = max;
7574 param.sched_priority = priority;
7576 // Set the policy BEFORE the priority. Otherwise it fails.
7577 pthread_attr_setschedpolicy(&attr, SCHED_RR);
7578 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
7579 // This is definitely required. Otherwise it fails.
7580 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
7581 pthread_attr_setschedparam(&attr, ¶m);
7584 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
7586 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
7589 stream_.callbackInfo.isRunning = true;
7590 result = pthread_create( &stream_.callbackInfo.thread, &attr, alsaCallbackHandler, &stream_.callbackInfo );
7591 pthread_attr_destroy( &attr );
7593 // Failed. Try instead with default attributes.
7594 result = pthread_create( &stream_.callbackInfo.thread, NULL, alsaCallbackHandler, &stream_.callbackInfo );
7596 stream_.callbackInfo.isRunning = false;
7597 errorText_ = "RtApiAlsa::error creating callback thread!";
7607 pthread_cond_destroy( &apiInfo->runnable_cv );
7608 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
7609 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
7611 stream_.apiHandle = 0;
7614 if ( phandle) snd_pcm_close( phandle );
7616 for ( int i=0; i<2; i++ ) {
7617 if ( stream_.userBuffer[i] ) {
7618 free( stream_.userBuffer[i] );
7619 stream_.userBuffer[i] = 0;
7623 if ( stream_.deviceBuffer ) {
7624 free( stream_.deviceBuffer );
7625 stream_.deviceBuffer = 0;
7628 stream_.state = STREAM_CLOSED;
7632 void RtApiAlsa :: closeStream()
7634 if ( stream_.state == STREAM_CLOSED ) {
7635 errorText_ = "RtApiAlsa::closeStream(): no open stream to close!";
7636 error( RtAudioError::WARNING );
7640 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
7641 stream_.callbackInfo.isRunning = false;
7642 MUTEX_LOCK( &stream_.mutex );
7643 if ( stream_.state == STREAM_STOPPED ) {
7644 apiInfo->runnable = true;
7645 pthread_cond_signal( &apiInfo->runnable_cv );
7647 MUTEX_UNLOCK( &stream_.mutex );
7648 pthread_join( stream_.callbackInfo.thread, NULL );
7650 if ( stream_.state == STREAM_RUNNING ) {
7651 stream_.state = STREAM_STOPPED;
7652 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
7653 snd_pcm_drop( apiInfo->handles[0] );
7654 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
7655 snd_pcm_drop( apiInfo->handles[1] );
7659 pthread_cond_destroy( &apiInfo->runnable_cv );
7660 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
7661 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
7663 stream_.apiHandle = 0;
7666 for ( int i=0; i<2; i++ ) {
7667 if ( stream_.userBuffer[i] ) {
7668 free( stream_.userBuffer[i] );
7669 stream_.userBuffer[i] = 0;
7673 if ( stream_.deviceBuffer ) {
7674 free( stream_.deviceBuffer );
7675 stream_.deviceBuffer = 0;
7678 stream_.mode = UNINITIALIZED;
7679 stream_.state = STREAM_CLOSED;
7682 void RtApiAlsa :: startStream()
7684 // This method calls snd_pcm_prepare if the device isn't already in that state.
7687 if ( stream_.state == STREAM_RUNNING ) {
7688 errorText_ = "RtApiAlsa::startStream(): the stream is already running!";
7689 error( RtAudioError::WARNING );
7693 MUTEX_LOCK( &stream_.mutex );
7696 snd_pcm_state_t state;
7697 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
7698 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
7699 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
7700 state = snd_pcm_state( handle[0] );
7701 if ( state != SND_PCM_STATE_PREPARED ) {
7702 result = snd_pcm_prepare( handle[0] );
7704 errorStream_ << "RtApiAlsa::startStream: error preparing output pcm device, " << snd_strerror( result ) << ".";
7705 errorText_ = errorStream_.str();
7711 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
7712 result = snd_pcm_drop(handle[1]); // fix to remove stale data received since device has been open
7713 state = snd_pcm_state( handle[1] );
7714 if ( state != SND_PCM_STATE_PREPARED ) {
7715 result = snd_pcm_prepare( handle[1] );
7717 errorStream_ << "RtApiAlsa::startStream: error preparing input pcm device, " << snd_strerror( result ) << ".";
7718 errorText_ = errorStream_.str();
7724 stream_.state = STREAM_RUNNING;
7727 apiInfo->runnable = true;
7728 pthread_cond_signal( &apiInfo->runnable_cv );
7729 MUTEX_UNLOCK( &stream_.mutex );
7731 if ( result >= 0 ) return;
7732 error( RtAudioError::SYSTEM_ERROR );
7735 void RtApiAlsa :: stopStream()
7738 if ( stream_.state == STREAM_STOPPED ) {
7739 errorText_ = "RtApiAlsa::stopStream(): the stream is already stopped!";
7740 error( RtAudioError::WARNING );
7744 stream_.state = STREAM_STOPPED;
7745 MUTEX_LOCK( &stream_.mutex );
7748 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
7749 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
7750 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
7751 if ( apiInfo->synchronized )
7752 result = snd_pcm_drop( handle[0] );
7754 result = snd_pcm_drain( handle[0] );
7756 errorStream_ << "RtApiAlsa::stopStream: error draining output pcm device, " << snd_strerror( result ) << ".";
7757 errorText_ = errorStream_.str();
7762 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
7763 result = snd_pcm_drop( handle[1] );
7765 errorStream_ << "RtApiAlsa::stopStream: error stopping input pcm device, " << snd_strerror( result ) << ".";
7766 errorText_ = errorStream_.str();
7772 apiInfo->runnable = false; // fixes high CPU usage when stopped
7773 MUTEX_UNLOCK( &stream_.mutex );
7775 if ( result >= 0 ) return;
7776 error( RtAudioError::SYSTEM_ERROR );
7779 void RtApiAlsa :: abortStream()
7782 if ( stream_.state == STREAM_STOPPED ) {
7783 errorText_ = "RtApiAlsa::abortStream(): the stream is already stopped!";
7784 error( RtAudioError::WARNING );
7788 stream_.state = STREAM_STOPPED;
7789 MUTEX_LOCK( &stream_.mutex );
7792 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
7793 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
7794 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
7795 result = snd_pcm_drop( handle[0] );
7797 errorStream_ << "RtApiAlsa::abortStream: error aborting output pcm device, " << snd_strerror( result ) << ".";
7798 errorText_ = errorStream_.str();
7803 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
7804 result = snd_pcm_drop( handle[1] );
7806 errorStream_ << "RtApiAlsa::abortStream: error aborting input pcm device, " << snd_strerror( result ) << ".";
7807 errorText_ = errorStream_.str();
7813 apiInfo->runnable = false; // fixes high CPU usage when stopped
7814 MUTEX_UNLOCK( &stream_.mutex );
7816 if ( result >= 0 ) return;
7817 error( RtAudioError::SYSTEM_ERROR );
7820 void RtApiAlsa :: callbackEvent()
7822 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
7823 if ( stream_.state == STREAM_STOPPED ) {
7824 MUTEX_LOCK( &stream_.mutex );
7825 while ( !apiInfo->runnable )
7826 pthread_cond_wait( &apiInfo->runnable_cv, &stream_.mutex );
7828 if ( stream_.state != STREAM_RUNNING ) {
7829 MUTEX_UNLOCK( &stream_.mutex );
7832 MUTEX_UNLOCK( &stream_.mutex );
7835 if ( stream_.state == STREAM_CLOSED ) {
7836 errorText_ = "RtApiAlsa::callbackEvent(): the stream is closed ... this shouldn't happen!";
7837 error( RtAudioError::WARNING );
7841 int doStopStream = 0;
7842 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
7843 double streamTime = getStreamTime();
7844 RtAudioStreamStatus status = 0;
7845 if ( stream_.mode != INPUT && apiInfo->xrun[0] == true ) {
7846 status |= RTAUDIO_OUTPUT_UNDERFLOW;
7847 apiInfo->xrun[0] = false;
7849 if ( stream_.mode != OUTPUT && apiInfo->xrun[1] == true ) {
7850 status |= RTAUDIO_INPUT_OVERFLOW;
7851 apiInfo->xrun[1] = false;
7853 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
7854 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
7856 if ( doStopStream == 2 ) {
7861 MUTEX_LOCK( &stream_.mutex );
7863 // The state might change while waiting on a mutex.
7864 if ( stream_.state == STREAM_STOPPED ) goto unlock;
7870 snd_pcm_sframes_t frames;
7871 RtAudioFormat format;
7872 handle = (snd_pcm_t **) apiInfo->handles;
7874 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
7876 // Setup parameters.
7877 if ( stream_.doConvertBuffer[1] ) {
7878 buffer = stream_.deviceBuffer;
7879 channels = stream_.nDeviceChannels[1];
7880 format = stream_.deviceFormat[1];
7883 buffer = stream_.userBuffer[1];
7884 channels = stream_.nUserChannels[1];
7885 format = stream_.userFormat;
7888 // Read samples from device in interleaved/non-interleaved format.
7889 if ( stream_.deviceInterleaved[1] )
7890 result = snd_pcm_readi( handle[1], buffer, stream_.bufferSize );
7892 void *bufs[channels];
7893 size_t offset = stream_.bufferSize * formatBytes( format );
7894 for ( int i=0; i<channels; i++ )
7895 bufs[i] = (void *) (buffer + (i * offset));
7896 result = snd_pcm_readn( handle[1], bufs, stream_.bufferSize );
7899 if ( result < (int) stream_.bufferSize ) {
7900 // Either an error or overrun occured.
7901 if ( result == -EPIPE ) {
7902 snd_pcm_state_t state = snd_pcm_state( handle[1] );
7903 if ( state == SND_PCM_STATE_XRUN ) {
7904 apiInfo->xrun[1] = true;
7905 result = snd_pcm_prepare( handle[1] );
7907 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after overrun, " << snd_strerror( result ) << ".";
7908 errorText_ = errorStream_.str();
7912 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
7913 errorText_ = errorStream_.str();
7917 errorStream_ << "RtApiAlsa::callbackEvent: audio read error, " << snd_strerror( result ) << ".";
7918 errorText_ = errorStream_.str();
7920 error( RtAudioError::WARNING );
7924 // Do byte swapping if necessary.
7925 if ( stream_.doByteSwap[1] )
7926 byteSwapBuffer( buffer, stream_.bufferSize * channels, format );
7928 // Do buffer conversion if necessary.
7929 if ( stream_.doConvertBuffer[1] )
7930 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
7932 // Check stream latency
7933 result = snd_pcm_delay( handle[1], &frames );
7934 if ( result == 0 && frames > 0 ) stream_.latency[1] = frames;
7939 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
7941 // Setup parameters and do buffer conversion if necessary.
7942 if ( stream_.doConvertBuffer[0] ) {
7943 buffer = stream_.deviceBuffer;
7944 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
7945 channels = stream_.nDeviceChannels[0];
7946 format = stream_.deviceFormat[0];
7949 buffer = stream_.userBuffer[0];
7950 channels = stream_.nUserChannels[0];
7951 format = stream_.userFormat;
7954 // Do byte swapping if necessary.
7955 if ( stream_.doByteSwap[0] )
7956 byteSwapBuffer(buffer, stream_.bufferSize * channels, format);
7958 // Write samples to device in interleaved/non-interleaved format.
7959 if ( stream_.deviceInterleaved[0] )
7960 result = snd_pcm_writei( handle[0], buffer, stream_.bufferSize );
7962 void *bufs[channels];
7963 size_t offset = stream_.bufferSize * formatBytes( format );
7964 for ( int i=0; i<channels; i++ )
7965 bufs[i] = (void *) (buffer + (i * offset));
7966 result = snd_pcm_writen( handle[0], bufs, stream_.bufferSize );
7969 if ( result < (int) stream_.bufferSize ) {
7970 // Either an error or underrun occured.
7971 if ( result == -EPIPE ) {
7972 snd_pcm_state_t state = snd_pcm_state( handle[0] );
7973 if ( state == SND_PCM_STATE_XRUN ) {
7974 apiInfo->xrun[0] = true;
7975 result = snd_pcm_prepare( handle[0] );
7977 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after underrun, " << snd_strerror( result ) << ".";
7978 errorText_ = errorStream_.str();
7981 errorText_ = "RtApiAlsa::callbackEvent: audio write error, underrun.";
7984 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
7985 errorText_ = errorStream_.str();
7989 errorStream_ << "RtApiAlsa::callbackEvent: audio write error, " << snd_strerror( result ) << ".";
7990 errorText_ = errorStream_.str();
7992 error( RtAudioError::WARNING );
7996 // Check stream latency
7997 result = snd_pcm_delay( handle[0], &frames );
7998 if ( result == 0 && frames > 0 ) stream_.latency[0] = frames;
8002 MUTEX_UNLOCK( &stream_.mutex );
8004 RtApi::tickStreamTime();
8005 if ( doStopStream == 1 ) this->stopStream();
8008 static void *alsaCallbackHandler( void *ptr )
8010 CallbackInfo *info = (CallbackInfo *) ptr;
8011 RtApiAlsa *object = (RtApiAlsa *) info->object;
8012 bool *isRunning = &info->isRunning;
8014 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
8015 if ( info->doRealtime ) {
8016 std::cerr << "RtAudio alsa: " <<
8017 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
8018 "running realtime scheduling" << std::endl;
8022 while ( *isRunning == true ) {
8023 pthread_testcancel();
8024 object->callbackEvent();
8027 pthread_exit( NULL );
8030 //******************** End of __LINUX_ALSA__ *********************//
8033 #if defined(__LINUX_PULSE__)
8035 // Code written by Peter Meerwald, pmeerw@pmeerw.net
8036 // and Tristan Matthews.
8038 #include <pulse/error.h>
8039 #include <pulse/simple.h>
8042 static const unsigned int SUPPORTED_SAMPLERATES[] = { 8000, 16000, 22050, 32000,
8043 44100, 48000, 96000, 0};
8045 struct rtaudio_pa_format_mapping_t {
8046 RtAudioFormat rtaudio_format;
8047 pa_sample_format_t pa_format;
8050 static const rtaudio_pa_format_mapping_t supported_sampleformats[] = {
8051 {RTAUDIO_SINT16, PA_SAMPLE_S16LE},
8052 {RTAUDIO_SINT32, PA_SAMPLE_S32LE},
8053 {RTAUDIO_FLOAT32, PA_SAMPLE_FLOAT32LE},
8054 {0, PA_SAMPLE_INVALID}};
8056 struct PulseAudioHandle {
8060 pthread_cond_t runnable_cv;
8062 PulseAudioHandle() : s_play(0), s_rec(0), runnable(false) { }
8065 RtApiPulse::~RtApiPulse()
8067 if ( stream_.state != STREAM_CLOSED )
8071 unsigned int RtApiPulse::getDeviceCount( void )
8076 RtAudio::DeviceInfo RtApiPulse::getDeviceInfo( unsigned int /*device*/ )
8078 RtAudio::DeviceInfo info;
8080 info.name = "PulseAudio";
8081 info.outputChannels = 2;
8082 info.inputChannels = 2;
8083 info.duplexChannels = 2;
8084 info.isDefaultOutput = true;
8085 info.isDefaultInput = true;
8087 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr )
8088 info.sampleRates.push_back( *sr );
8090 info.preferredSampleRate = 48000;
8091 info.nativeFormats = RTAUDIO_SINT16 | RTAUDIO_SINT32 | RTAUDIO_FLOAT32;
8096 static void *pulseaudio_callback( void * user )
8098 CallbackInfo *cbi = static_cast<CallbackInfo *>( user );
8099 RtApiPulse *context = static_cast<RtApiPulse *>( cbi->object );
8100 volatile bool *isRunning = &cbi->isRunning;
8102 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
8103 if (cbi->doRealtime) {
8104 std::cerr << "RtAudio pulse: " <<
8105 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
8106 "running realtime scheduling" << std::endl;
8110 while ( *isRunning ) {
8111 pthread_testcancel();
8112 context->callbackEvent();
8115 pthread_exit( NULL );
8118 void RtApiPulse::closeStream( void )
8120 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8122 stream_.callbackInfo.isRunning = false;
8124 MUTEX_LOCK( &stream_.mutex );
8125 if ( stream_.state == STREAM_STOPPED ) {
8126 pah->runnable = true;
8127 pthread_cond_signal( &pah->runnable_cv );
8129 MUTEX_UNLOCK( &stream_.mutex );
8131 pthread_join( pah->thread, 0 );
8132 if ( pah->s_play ) {
8133 pa_simple_flush( pah->s_play, NULL );
8134 pa_simple_free( pah->s_play );
8137 pa_simple_free( pah->s_rec );
8139 pthread_cond_destroy( &pah->runnable_cv );
8141 stream_.apiHandle = 0;
8144 if ( stream_.userBuffer[0] ) {
8145 free( stream_.userBuffer[0] );
8146 stream_.userBuffer[0] = 0;
8148 if ( stream_.userBuffer[1] ) {
8149 free( stream_.userBuffer[1] );
8150 stream_.userBuffer[1] = 0;
8153 stream_.state = STREAM_CLOSED;
8154 stream_.mode = UNINITIALIZED;
8157 void RtApiPulse::callbackEvent( void )
8159 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8161 if ( stream_.state == STREAM_STOPPED ) {
8162 MUTEX_LOCK( &stream_.mutex );
8163 while ( !pah->runnable )
8164 pthread_cond_wait( &pah->runnable_cv, &stream_.mutex );
8166 if ( stream_.state != STREAM_RUNNING ) {
8167 MUTEX_UNLOCK( &stream_.mutex );
8170 MUTEX_UNLOCK( &stream_.mutex );
8173 if ( stream_.state == STREAM_CLOSED ) {
8174 errorText_ = "RtApiPulse::callbackEvent(): the stream is closed ... "
8175 "this shouldn't happen!";
8176 error( RtAudioError::WARNING );
8180 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
8181 double streamTime = getStreamTime();
8182 RtAudioStreamStatus status = 0;
8183 int doStopStream = callback( stream_.userBuffer[OUTPUT], stream_.userBuffer[INPUT],
8184 stream_.bufferSize, streamTime, status,
8185 stream_.callbackInfo.userData );
8187 if ( doStopStream == 2 ) {
8192 MUTEX_LOCK( &stream_.mutex );
8193 void *pulse_in = stream_.doConvertBuffer[INPUT] ? stream_.deviceBuffer : stream_.userBuffer[INPUT];
8194 void *pulse_out = stream_.doConvertBuffer[OUTPUT] ? stream_.deviceBuffer : stream_.userBuffer[OUTPUT];
8196 if ( stream_.state != STREAM_RUNNING )
8201 if (stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8202 if ( stream_.doConvertBuffer[OUTPUT] ) {
8203 convertBuffer( stream_.deviceBuffer,
8204 stream_.userBuffer[OUTPUT],
8205 stream_.convertInfo[OUTPUT] );
8206 bytes = stream_.nDeviceChannels[OUTPUT] * stream_.bufferSize *
8207 formatBytes( stream_.deviceFormat[OUTPUT] );
8209 bytes = stream_.nUserChannels[OUTPUT] * stream_.bufferSize *
8210 formatBytes( stream_.userFormat );
8212 if ( pa_simple_write( pah->s_play, pulse_out, bytes, &pa_error ) < 0 ) {
8213 errorStream_ << "RtApiPulse::callbackEvent: audio write error, " <<
8214 pa_strerror( pa_error ) << ".";
8215 errorText_ = errorStream_.str();
8216 error( RtAudioError::WARNING );
8220 if ( stream_.mode == INPUT || stream_.mode == DUPLEX) {
8221 if ( stream_.doConvertBuffer[INPUT] )
8222 bytes = stream_.nDeviceChannels[INPUT] * stream_.bufferSize *
8223 formatBytes( stream_.deviceFormat[INPUT] );
8225 bytes = stream_.nUserChannels[INPUT] * stream_.bufferSize *
8226 formatBytes( stream_.userFormat );
8228 if ( pa_simple_read( pah->s_rec, pulse_in, bytes, &pa_error ) < 0 ) {
8229 errorStream_ << "RtApiPulse::callbackEvent: audio read error, " <<
8230 pa_strerror( pa_error ) << ".";
8231 errorText_ = errorStream_.str();
8232 error( RtAudioError::WARNING );
8234 if ( stream_.doConvertBuffer[INPUT] ) {
8235 convertBuffer( stream_.userBuffer[INPUT],
8236 stream_.deviceBuffer,
8237 stream_.convertInfo[INPUT] );
8242 MUTEX_UNLOCK( &stream_.mutex );
8243 RtApi::tickStreamTime();
8245 if ( doStopStream == 1 )
8249 void RtApiPulse::startStream( void )
8251 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8253 if ( stream_.state == STREAM_CLOSED ) {
8254 errorText_ = "RtApiPulse::startStream(): the stream is not open!";
8255 error( RtAudioError::INVALID_USE );
8258 if ( stream_.state == STREAM_RUNNING ) {
8259 errorText_ = "RtApiPulse::startStream(): the stream is already running!";
8260 error( RtAudioError::WARNING );
8264 MUTEX_LOCK( &stream_.mutex );
8266 stream_.state = STREAM_RUNNING;
8268 pah->runnable = true;
8269 pthread_cond_signal( &pah->runnable_cv );
8270 MUTEX_UNLOCK( &stream_.mutex );
8273 void RtApiPulse::stopStream( void )
8275 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8277 if ( stream_.state == STREAM_CLOSED ) {
8278 errorText_ = "RtApiPulse::stopStream(): the stream is not open!";
8279 error( RtAudioError::INVALID_USE );
8282 if ( stream_.state == STREAM_STOPPED ) {
8283 errorText_ = "RtApiPulse::stopStream(): the stream is already stopped!";
8284 error( RtAudioError::WARNING );
8288 stream_.state = STREAM_STOPPED;
8289 MUTEX_LOCK( &stream_.mutex );
8291 if ( pah && pah->s_play ) {
8293 if ( pa_simple_drain( pah->s_play, &pa_error ) < 0 ) {
8294 errorStream_ << "RtApiPulse::stopStream: error draining output device, " <<
8295 pa_strerror( pa_error ) << ".";
8296 errorText_ = errorStream_.str();
8297 MUTEX_UNLOCK( &stream_.mutex );
8298 error( RtAudioError::SYSTEM_ERROR );
8303 stream_.state = STREAM_STOPPED;
8304 MUTEX_UNLOCK( &stream_.mutex );
8307 void RtApiPulse::abortStream( void )
8309 PulseAudioHandle *pah = static_cast<PulseAudioHandle*>( stream_.apiHandle );
8311 if ( stream_.state == STREAM_CLOSED ) {
8312 errorText_ = "RtApiPulse::abortStream(): the stream is not open!";
8313 error( RtAudioError::INVALID_USE );
8316 if ( stream_.state == STREAM_STOPPED ) {
8317 errorText_ = "RtApiPulse::abortStream(): the stream is already stopped!";
8318 error( RtAudioError::WARNING );
8322 stream_.state = STREAM_STOPPED;
8323 MUTEX_LOCK( &stream_.mutex );
8325 if ( pah && pah->s_play ) {
8327 if ( pa_simple_flush( pah->s_play, &pa_error ) < 0 ) {
8328 errorStream_ << "RtApiPulse::abortStream: error flushing output device, " <<
8329 pa_strerror( pa_error ) << ".";
8330 errorText_ = errorStream_.str();
8331 MUTEX_UNLOCK( &stream_.mutex );
8332 error( RtAudioError::SYSTEM_ERROR );
8337 stream_.state = STREAM_STOPPED;
8338 MUTEX_UNLOCK( &stream_.mutex );
8341 bool RtApiPulse::probeDeviceOpen( unsigned int device, StreamMode mode,
8342 unsigned int channels, unsigned int firstChannel,
8343 unsigned int sampleRate, RtAudioFormat format,
8344 unsigned int *bufferSize, RtAudio::StreamOptions *options )
8346 PulseAudioHandle *pah = 0;
8347 unsigned long bufferBytes = 0;
8350 if ( device != 0 ) return false;
8351 if ( mode != INPUT && mode != OUTPUT ) return false;
8352 if ( channels != 1 && channels != 2 ) {
8353 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported number of channels.";
8356 ss.channels = channels;
8358 if ( firstChannel != 0 ) return false;
8360 bool sr_found = false;
8361 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr ) {
8362 if ( sampleRate == *sr ) {
8364 stream_.sampleRate = sampleRate;
8365 ss.rate = sampleRate;
8370 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported sample rate.";
8375 for ( const rtaudio_pa_format_mapping_t *sf = supported_sampleformats;
8376 sf->rtaudio_format && sf->pa_format != PA_SAMPLE_INVALID; ++sf ) {
8377 if ( format == sf->rtaudio_format ) {
8379 stream_.userFormat = sf->rtaudio_format;
8380 stream_.deviceFormat[mode] = stream_.userFormat;
8381 ss.format = sf->pa_format;
8385 if ( !sf_found ) { // Use internal data format conversion.
8386 stream_.userFormat = format;
8387 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
8388 ss.format = PA_SAMPLE_FLOAT32LE;
8391 // Set other stream parameters.
8392 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
8393 else stream_.userInterleaved = true;
8394 stream_.deviceInterleaved[mode] = true;
8395 stream_.nBuffers = 1;
8396 stream_.doByteSwap[mode] = false;
8397 stream_.nUserChannels[mode] = channels;
8398 stream_.nDeviceChannels[mode] = channels + firstChannel;
8399 stream_.channelOffset[mode] = 0;
8400 std::string streamName = "RtAudio";
8402 // Set flags for buffer conversion.
8403 stream_.doConvertBuffer[mode] = false;
8404 if ( stream_.userFormat != stream_.deviceFormat[mode] )
8405 stream_.doConvertBuffer[mode] = true;
8406 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
8407 stream_.doConvertBuffer[mode] = true;
8409 // Allocate necessary internal buffers.
8410 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
8411 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
8412 if ( stream_.userBuffer[mode] == NULL ) {
8413 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating user buffer memory.";
8416 stream_.bufferSize = *bufferSize;
8418 if ( stream_.doConvertBuffer[mode] ) {
8420 bool makeBuffer = true;
8421 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
8422 if ( mode == INPUT ) {
8423 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
8424 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
8425 if ( bufferBytes <= bytesOut ) makeBuffer = false;
8430 bufferBytes *= *bufferSize;
8431 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
8432 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
8433 if ( stream_.deviceBuffer == NULL ) {
8434 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating device buffer memory.";
8440 stream_.device[mode] = device;
8442 // Setup the buffer conversion information structure.
8443 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
8445 if ( !stream_.apiHandle ) {
8446 PulseAudioHandle *pah = new PulseAudioHandle;
8448 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating memory for handle.";
8452 stream_.apiHandle = pah;
8453 if ( pthread_cond_init( &pah->runnable_cv, NULL ) != 0 ) {
8454 errorText_ = "RtApiPulse::probeDeviceOpen: error creating condition variable.";
8458 pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8461 if ( options && !options->streamName.empty() ) streamName = options->streamName;
8464 pa_buffer_attr buffer_attr;
8465 buffer_attr.fragsize = bufferBytes;
8466 buffer_attr.maxlength = -1;
8468 pah->s_rec = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_RECORD, NULL, "Record", &ss, NULL, &buffer_attr, &error );
8469 if ( !pah->s_rec ) {
8470 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting input to PulseAudio server.";
8475 pah->s_play = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_PLAYBACK, NULL, "Playback", &ss, NULL, NULL, &error );
8476 if ( !pah->s_play ) {
8477 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting output to PulseAudio server.";
8485 if ( stream_.mode == UNINITIALIZED )
8486 stream_.mode = mode;
8487 else if ( stream_.mode == mode )
8490 stream_.mode = DUPLEX;
8492 if ( !stream_.callbackInfo.isRunning ) {
8493 stream_.callbackInfo.object = this;
8495 stream_.state = STREAM_STOPPED;
8496 // Set the thread attributes for joinable and realtime scheduling
8497 // priority (optional). The higher priority will only take affect
8498 // if the program is run as root or suid. Note, under Linux
8499 // processes with CAP_SYS_NICE privilege, a user can change
8500 // scheduling policy and priority (thus need not be root). See
8501 // POSIX "capabilities".
8502 pthread_attr_t attr;
8503 pthread_attr_init( &attr );
8504 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
8505 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
8506 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
8507 stream_.callbackInfo.doRealtime = true;
8508 struct sched_param param;
8509 int priority = options->priority;
8510 int min = sched_get_priority_min( SCHED_RR );
8511 int max = sched_get_priority_max( SCHED_RR );
8512 if ( priority < min ) priority = min;
8513 else if ( priority > max ) priority = max;
8514 param.sched_priority = priority;
8516 // Set the policy BEFORE the priority. Otherwise it fails.
8517 pthread_attr_setschedpolicy(&attr, SCHED_RR);
8518 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
8519 // This is definitely required. Otherwise it fails.
8520 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
8521 pthread_attr_setschedparam(&attr, ¶m);
8524 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
8526 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
8529 stream_.callbackInfo.isRunning = true;
8530 int result = pthread_create( &pah->thread, &attr, pulseaudio_callback, (void *)&stream_.callbackInfo);
8531 pthread_attr_destroy(&attr);
8533 // Failed. Try instead with default attributes.
8534 result = pthread_create( &pah->thread, NULL, pulseaudio_callback, (void *)&stream_.callbackInfo);
8536 stream_.callbackInfo.isRunning = false;
8537 errorText_ = "RtApiPulse::probeDeviceOpen: error creating thread.";
8546 if ( pah && stream_.callbackInfo.isRunning ) {
8547 pthread_cond_destroy( &pah->runnable_cv );
8549 stream_.apiHandle = 0;
8552 for ( int i=0; i<2; i++ ) {
8553 if ( stream_.userBuffer[i] ) {
8554 free( stream_.userBuffer[i] );
8555 stream_.userBuffer[i] = 0;
8559 if ( stream_.deviceBuffer ) {
8560 free( stream_.deviceBuffer );
8561 stream_.deviceBuffer = 0;
8564 stream_.state = STREAM_CLOSED;
8568 //******************** End of __LINUX_PULSE__ *********************//
8571 #if defined(__LINUX_OSS__)
8574 #include <sys/ioctl.h>
8577 #include <sys/soundcard.h>
8581 static void *ossCallbackHandler(void * ptr);
8583 // A structure to hold various information related to the OSS API
8586 int id[2]; // device ids
8589 pthread_cond_t runnable;
8592 :triggered(false) { id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
8595 RtApiOss :: RtApiOss()
8597 // Nothing to do here.
8600 RtApiOss :: ~RtApiOss()
8602 if ( stream_.state != STREAM_CLOSED ) closeStream();
8605 unsigned int RtApiOss :: getDeviceCount( void )
8607 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
8608 if ( mixerfd == -1 ) {
8609 errorText_ = "RtApiOss::getDeviceCount: error opening '/dev/mixer'.";
8610 error( RtAudioError::WARNING );
8614 oss_sysinfo sysinfo;
8615 if ( ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo ) == -1 ) {
8617 errorText_ = "RtApiOss::getDeviceCount: error getting sysinfo, OSS version >= 4.0 is required.";
8618 error( RtAudioError::WARNING );
8623 return sysinfo.numaudios;
8626 RtAudio::DeviceInfo RtApiOss :: getDeviceInfo( unsigned int device )
8628 RtAudio::DeviceInfo info;
8629 info.probed = false;
8631 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
8632 if ( mixerfd == -1 ) {
8633 errorText_ = "RtApiOss::getDeviceInfo: error opening '/dev/mixer'.";
8634 error( RtAudioError::WARNING );
8638 oss_sysinfo sysinfo;
8639 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
8640 if ( result == -1 ) {
8642 errorText_ = "RtApiOss::getDeviceInfo: error getting sysinfo, OSS version >= 4.0 is required.";
8643 error( RtAudioError::WARNING );
8647 unsigned nDevices = sysinfo.numaudios;
8648 if ( nDevices == 0 ) {
8650 errorText_ = "RtApiOss::getDeviceInfo: no devices found!";
8651 error( RtAudioError::INVALID_USE );
8655 if ( device >= nDevices ) {
8657 errorText_ = "RtApiOss::getDeviceInfo: device ID is invalid!";
8658 error( RtAudioError::INVALID_USE );
8662 oss_audioinfo ainfo;
8664 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
8666 if ( result == -1 ) {
8667 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
8668 errorText_ = errorStream_.str();
8669 error( RtAudioError::WARNING );
8674 if ( ainfo.caps & PCM_CAP_OUTPUT ) info.outputChannels = ainfo.max_channels;
8675 if ( ainfo.caps & PCM_CAP_INPUT ) info.inputChannels = ainfo.max_channels;
8676 if ( ainfo.caps & PCM_CAP_DUPLEX ) {
8677 if ( info.outputChannels > 0 && info.inputChannels > 0 && ainfo.caps & PCM_CAP_DUPLEX )
8678 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
8681 // Probe data formats ... do for input
8682 unsigned long mask = ainfo.iformats;
8683 if ( mask & AFMT_S16_LE || mask & AFMT_S16_BE )
8684 info.nativeFormats |= RTAUDIO_SINT16;
8685 if ( mask & AFMT_S8 )
8686 info.nativeFormats |= RTAUDIO_SINT8;
8687 if ( mask & AFMT_S32_LE || mask & AFMT_S32_BE )
8688 info.nativeFormats |= RTAUDIO_SINT32;
8690 if ( mask & AFMT_FLOAT )
8691 info.nativeFormats |= RTAUDIO_FLOAT32;
8693 if ( mask & AFMT_S24_LE || mask & AFMT_S24_BE )
8694 info.nativeFormats |= RTAUDIO_SINT24;
8696 // Check that we have at least one supported format
8697 if ( info.nativeFormats == 0 ) {
8698 errorStream_ << "RtApiOss::getDeviceInfo: device (" << ainfo.name << ") data format not supported by RtAudio.";
8699 errorText_ = errorStream_.str();
8700 error( RtAudioError::WARNING );
8704 // Probe the supported sample rates.
8705 info.sampleRates.clear();
8706 if ( ainfo.nrates ) {
8707 for ( unsigned int i=0; i<ainfo.nrates; i++ ) {
8708 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
8709 if ( ainfo.rates[i] == SAMPLE_RATES[k] ) {
8710 info.sampleRates.push_back( SAMPLE_RATES[k] );
8712 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
8713 info.preferredSampleRate = SAMPLE_RATES[k];
8721 // Check min and max rate values;
8722 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
8723 if ( ainfo.min_rate <= (int) SAMPLE_RATES[k] && ainfo.max_rate >= (int) SAMPLE_RATES[k] ) {
8724 info.sampleRates.push_back( SAMPLE_RATES[k] );
8726 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
8727 info.preferredSampleRate = SAMPLE_RATES[k];
8732 if ( info.sampleRates.size() == 0 ) {
8733 errorStream_ << "RtApiOss::getDeviceInfo: no supported sample rates found for device (" << ainfo.name << ").";
8734 errorText_ = errorStream_.str();
8735 error( RtAudioError::WARNING );
8739 info.name = ainfo.name;
8746 bool RtApiOss :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
8747 unsigned int firstChannel, unsigned int sampleRate,
8748 RtAudioFormat format, unsigned int *bufferSize,
8749 RtAudio::StreamOptions *options )
8751 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
8752 if ( mixerfd == -1 ) {
8753 errorText_ = "RtApiOss::probeDeviceOpen: error opening '/dev/mixer'.";
8757 oss_sysinfo sysinfo;
8758 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
8759 if ( result == -1 ) {
8761 errorText_ = "RtApiOss::probeDeviceOpen: error getting sysinfo, OSS version >= 4.0 is required.";
8765 unsigned nDevices = sysinfo.numaudios;
8766 if ( nDevices == 0 ) {
8767 // This should not happen because a check is made before this function is called.
8769 errorText_ = "RtApiOss::probeDeviceOpen: no devices found!";
8773 if ( device >= nDevices ) {
8774 // This should not happen because a check is made before this function is called.
8776 errorText_ = "RtApiOss::probeDeviceOpen: device ID is invalid!";
8780 oss_audioinfo ainfo;
8782 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
8784 if ( result == -1 ) {
8785 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
8786 errorText_ = errorStream_.str();
8790 // Check if device supports input or output
8791 if ( ( mode == OUTPUT && !( ainfo.caps & PCM_CAP_OUTPUT ) ) ||
8792 ( mode == INPUT && !( ainfo.caps & PCM_CAP_INPUT ) ) ) {
8793 if ( mode == OUTPUT )
8794 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support output.";
8796 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support input.";
8797 errorText_ = errorStream_.str();
8802 OssHandle *handle = (OssHandle *) stream_.apiHandle;
8803 if ( mode == OUTPUT )
8805 else { // mode == INPUT
8806 if (stream_.mode == OUTPUT && stream_.device[0] == device) {
8807 // We just set the same device for playback ... close and reopen for duplex (OSS only).
8808 close( handle->id[0] );
8810 if ( !( ainfo.caps & PCM_CAP_DUPLEX ) ) {
8811 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support duplex mode.";
8812 errorText_ = errorStream_.str();
8815 // Check that the number previously set channels is the same.
8816 if ( stream_.nUserChannels[0] != channels ) {
8817 errorStream_ << "RtApiOss::probeDeviceOpen: input/output channels must be equal for OSS duplex device (" << ainfo.name << ").";
8818 errorText_ = errorStream_.str();
8827 // Set exclusive access if specified.
8828 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) flags |= O_EXCL;
8830 // Try to open the device.
8832 fd = open( ainfo.devnode, flags, 0 );
8834 if ( errno == EBUSY )
8835 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") is busy.";
8837 errorStream_ << "RtApiOss::probeDeviceOpen: error opening device (" << ainfo.name << ").";
8838 errorText_ = errorStream_.str();
8842 // For duplex operation, specifically set this mode (this doesn't seem to work).
8844 if ( flags | O_RDWR ) {
8845 result = ioctl( fd, SNDCTL_DSP_SETDUPLEX, NULL );
8846 if ( result == -1) {
8847 errorStream_ << "RtApiOss::probeDeviceOpen: error setting duplex mode for device (" << ainfo.name << ").";
8848 errorText_ = errorStream_.str();
8854 // Check the device channel support.
8855 stream_.nUserChannels[mode] = channels;
8856 if ( ainfo.max_channels < (int)(channels + firstChannel) ) {
8858 errorStream_ << "RtApiOss::probeDeviceOpen: the device (" << ainfo.name << ") does not support requested channel parameters.";
8859 errorText_ = errorStream_.str();
8863 // Set the number of channels.
8864 int deviceChannels = channels + firstChannel;
8865 result = ioctl( fd, SNDCTL_DSP_CHANNELS, &deviceChannels );
8866 if ( result == -1 || deviceChannels < (int)(channels + firstChannel) ) {
8868 errorStream_ << "RtApiOss::probeDeviceOpen: error setting channel parameters on device (" << ainfo.name << ").";
8869 errorText_ = errorStream_.str();
8872 stream_.nDeviceChannels[mode] = deviceChannels;
8874 // Get the data format mask
8876 result = ioctl( fd, SNDCTL_DSP_GETFMTS, &mask );
8877 if ( result == -1 ) {
8879 errorStream_ << "RtApiOss::probeDeviceOpen: error getting device (" << ainfo.name << ") data formats.";
8880 errorText_ = errorStream_.str();
8884 // Determine how to set the device format.
8885 stream_.userFormat = format;
8886 int deviceFormat = -1;
8887 stream_.doByteSwap[mode] = false;
8888 if ( format == RTAUDIO_SINT8 ) {
8889 if ( mask & AFMT_S8 ) {
8890 deviceFormat = AFMT_S8;
8891 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
8894 else if ( format == RTAUDIO_SINT16 ) {
8895 if ( mask & AFMT_S16_NE ) {
8896 deviceFormat = AFMT_S16_NE;
8897 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
8899 else if ( mask & AFMT_S16_OE ) {
8900 deviceFormat = AFMT_S16_OE;
8901 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
8902 stream_.doByteSwap[mode] = true;
8905 else if ( format == RTAUDIO_SINT24 ) {
8906 if ( mask & AFMT_S24_NE ) {
8907 deviceFormat = AFMT_S24_NE;
8908 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
8910 else if ( mask & AFMT_S24_OE ) {
8911 deviceFormat = AFMT_S24_OE;
8912 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
8913 stream_.doByteSwap[mode] = true;
8916 else if ( format == RTAUDIO_SINT32 ) {
8917 if ( mask & AFMT_S32_NE ) {
8918 deviceFormat = AFMT_S32_NE;
8919 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
8921 else if ( mask & AFMT_S32_OE ) {
8922 deviceFormat = AFMT_S32_OE;
8923 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
8924 stream_.doByteSwap[mode] = true;
8928 if ( deviceFormat == -1 ) {
8929 // The user requested format is not natively supported by the device.
8930 if ( mask & AFMT_S16_NE ) {
8931 deviceFormat = AFMT_S16_NE;
8932 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
8934 else if ( mask & AFMT_S32_NE ) {
8935 deviceFormat = AFMT_S32_NE;
8936 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
8938 else if ( mask & AFMT_S24_NE ) {
8939 deviceFormat = AFMT_S24_NE;
8940 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
8942 else if ( mask & AFMT_S16_OE ) {
8943 deviceFormat = AFMT_S16_OE;
8944 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
8945 stream_.doByteSwap[mode] = true;
8947 else if ( mask & AFMT_S32_OE ) {
8948 deviceFormat = AFMT_S32_OE;
8949 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
8950 stream_.doByteSwap[mode] = true;
8952 else if ( mask & AFMT_S24_OE ) {
8953 deviceFormat = AFMT_S24_OE;
8954 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
8955 stream_.doByteSwap[mode] = true;
8957 else if ( mask & AFMT_S8) {
8958 deviceFormat = AFMT_S8;
8959 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
8963 if ( stream_.deviceFormat[mode] == 0 ) {
8964 // This really shouldn't happen ...
8966 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") data format not supported by RtAudio.";
8967 errorText_ = errorStream_.str();
8971 // Set the data format.
8972 int temp = deviceFormat;
8973 result = ioctl( fd, SNDCTL_DSP_SETFMT, &deviceFormat );
8974 if ( result == -1 || deviceFormat != temp ) {
8976 errorStream_ << "RtApiOss::probeDeviceOpen: error setting data format on device (" << ainfo.name << ").";
8977 errorText_ = errorStream_.str();
8981 // Attempt to set the buffer size. According to OSS, the minimum
8982 // number of buffers is two. The supposed minimum buffer size is 16
8983 // bytes, so that will be our lower bound. The argument to this
8984 // call is in the form 0xMMMMSSSS (hex), where the buffer size (in
8985 // bytes) is given as 2^SSSS and the number of buffers as 2^MMMM.
8986 // We'll check the actual value used near the end of the setup
8988 int ossBufferBytes = *bufferSize * formatBytes( stream_.deviceFormat[mode] ) * deviceChannels;
8989 if ( ossBufferBytes < 16 ) ossBufferBytes = 16;
8991 if ( options ) buffers = options->numberOfBuffers;
8992 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) buffers = 2;
8993 if ( buffers < 2 ) buffers = 3;
8994 temp = ((int) buffers << 16) + (int)( log10( (double)ossBufferBytes ) / log10( 2.0 ) );
8995 result = ioctl( fd, SNDCTL_DSP_SETFRAGMENT, &temp );
8996 if ( result == -1 ) {
8998 errorStream_ << "RtApiOss::probeDeviceOpen: error setting buffer size on device (" << ainfo.name << ").";
8999 errorText_ = errorStream_.str();
9002 stream_.nBuffers = buffers;
9004 // Save buffer size (in sample frames).
9005 *bufferSize = ossBufferBytes / ( formatBytes(stream_.deviceFormat[mode]) * deviceChannels );
9006 stream_.bufferSize = *bufferSize;
9008 // Set the sample rate.
9009 int srate = sampleRate;
9010 result = ioctl( fd, SNDCTL_DSP_SPEED, &srate );
9011 if ( result == -1 ) {
9013 errorStream_ << "RtApiOss::probeDeviceOpen: error setting sample rate (" << sampleRate << ") on device (" << ainfo.name << ").";
9014 errorText_ = errorStream_.str();
9018 // Verify the sample rate setup worked.
9019 if ( abs( srate - (int)sampleRate ) > 100 ) {
9021 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support sample rate (" << sampleRate << ").";
9022 errorText_ = errorStream_.str();
9025 stream_.sampleRate = sampleRate;
9027 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device) {
9028 // We're doing duplex setup here.
9029 stream_.deviceFormat[0] = stream_.deviceFormat[1];
9030 stream_.nDeviceChannels[0] = deviceChannels;
9033 // Set interleaving parameters.
9034 stream_.userInterleaved = true;
9035 stream_.deviceInterleaved[mode] = true;
9036 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
9037 stream_.userInterleaved = false;
9039 // Set flags for buffer conversion
9040 stream_.doConvertBuffer[mode] = false;
9041 if ( stream_.userFormat != stream_.deviceFormat[mode] )
9042 stream_.doConvertBuffer[mode] = true;
9043 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
9044 stream_.doConvertBuffer[mode] = true;
9045 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
9046 stream_.nUserChannels[mode] > 1 )
9047 stream_.doConvertBuffer[mode] = true;
9049 // Allocate the stream handles if necessary and then save.
9050 if ( stream_.apiHandle == 0 ) {
9052 handle = new OssHandle;
9054 catch ( std::bad_alloc& ) {
9055 errorText_ = "RtApiOss::probeDeviceOpen: error allocating OssHandle memory.";
9059 if ( pthread_cond_init( &handle->runnable, NULL ) ) {
9060 errorText_ = "RtApiOss::probeDeviceOpen: error initializing pthread condition variable.";
9064 stream_.apiHandle = (void *) handle;
9067 handle = (OssHandle *) stream_.apiHandle;
9069 handle->id[mode] = fd;
9071 // Allocate necessary internal buffers.
9072 unsigned long bufferBytes;
9073 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
9074 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
9075 if ( stream_.userBuffer[mode] == NULL ) {
9076 errorText_ = "RtApiOss::probeDeviceOpen: error allocating user buffer memory.";
9080 if ( stream_.doConvertBuffer[mode] ) {
9082 bool makeBuffer = true;
9083 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
9084 if ( mode == INPUT ) {
9085 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
9086 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
9087 if ( bufferBytes <= bytesOut ) makeBuffer = false;
9092 bufferBytes *= *bufferSize;
9093 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
9094 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
9095 if ( stream_.deviceBuffer == NULL ) {
9096 errorText_ = "RtApiOss::probeDeviceOpen: error allocating device buffer memory.";
9102 stream_.device[mode] = device;
9103 stream_.state = STREAM_STOPPED;
9105 // Setup the buffer conversion information structure.
9106 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
9108 // Setup thread if necessary.
9109 if ( stream_.mode == OUTPUT && mode == INPUT ) {
9110 // We had already set up an output stream.
9111 stream_.mode = DUPLEX;
9112 if ( stream_.device[0] == device ) handle->id[0] = fd;
9115 stream_.mode = mode;
9117 // Setup callback thread.
9118 stream_.callbackInfo.object = (void *) this;
9120 // Set the thread attributes for joinable and realtime scheduling
9121 // priority. The higher priority will only take affect if the
9122 // program is run as root or suid.
9123 pthread_attr_t attr;
9124 pthread_attr_init( &attr );
9125 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
9126 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
9127 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
9128 stream_.callbackInfo.doRealtime = true;
9129 struct sched_param param;
9130 int priority = options->priority;
9131 int min = sched_get_priority_min( SCHED_RR );
9132 int max = sched_get_priority_max( SCHED_RR );
9133 if ( priority < min ) priority = min;
9134 else if ( priority > max ) priority = max;
9135 param.sched_priority = priority;
9137 // Set the policy BEFORE the priority. Otherwise it fails.
9138 pthread_attr_setschedpolicy(&attr, SCHED_RR);
9139 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
9140 // This is definitely required. Otherwise it fails.
9141 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
9142 pthread_attr_setschedparam(&attr, ¶m);
9145 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
9147 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
9150 stream_.callbackInfo.isRunning = true;
9151 result = pthread_create( &stream_.callbackInfo.thread, &attr, ossCallbackHandler, &stream_.callbackInfo );
9152 pthread_attr_destroy( &attr );
9154 // Failed. Try instead with default attributes.
9155 result = pthread_create( &stream_.callbackInfo.thread, NULL, ossCallbackHandler, &stream_.callbackInfo );
9157 stream_.callbackInfo.isRunning = false;
9158 errorText_ = "RtApiOss::error creating callback thread!";
9168 pthread_cond_destroy( &handle->runnable );
9169 if ( handle->id[0] ) close( handle->id[0] );
9170 if ( handle->id[1] ) close( handle->id[1] );
9172 stream_.apiHandle = 0;
9175 for ( int i=0; i<2; i++ ) {
9176 if ( stream_.userBuffer[i] ) {
9177 free( stream_.userBuffer[i] );
9178 stream_.userBuffer[i] = 0;
9182 if ( stream_.deviceBuffer ) {
9183 free( stream_.deviceBuffer );
9184 stream_.deviceBuffer = 0;
9187 stream_.state = STREAM_CLOSED;
9191 void RtApiOss :: closeStream()
9193 if ( stream_.state == STREAM_CLOSED ) {
9194 errorText_ = "RtApiOss::closeStream(): no open stream to close!";
9195 error( RtAudioError::WARNING );
9199 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9200 stream_.callbackInfo.isRunning = false;
9201 MUTEX_LOCK( &stream_.mutex );
9202 if ( stream_.state == STREAM_STOPPED )
9203 pthread_cond_signal( &handle->runnable );
9204 MUTEX_UNLOCK( &stream_.mutex );
9205 pthread_join( stream_.callbackInfo.thread, NULL );
9207 if ( stream_.state == STREAM_RUNNING ) {
9208 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
9209 ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9211 ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9212 stream_.state = STREAM_STOPPED;
9216 pthread_cond_destroy( &handle->runnable );
9217 if ( handle->id[0] ) close( handle->id[0] );
9218 if ( handle->id[1] ) close( handle->id[1] );
9220 stream_.apiHandle = 0;
9223 for ( int i=0; i<2; i++ ) {
9224 if ( stream_.userBuffer[i] ) {
9225 free( stream_.userBuffer[i] );
9226 stream_.userBuffer[i] = 0;
9230 if ( stream_.deviceBuffer ) {
9231 free( stream_.deviceBuffer );
9232 stream_.deviceBuffer = 0;
9235 stream_.mode = UNINITIALIZED;
9236 stream_.state = STREAM_CLOSED;
9239 void RtApiOss :: startStream()
9242 if ( stream_.state == STREAM_RUNNING ) {
9243 errorText_ = "RtApiOss::startStream(): the stream is already running!";
9244 error( RtAudioError::WARNING );
9248 MUTEX_LOCK( &stream_.mutex );
9250 stream_.state = STREAM_RUNNING;
9252 // No need to do anything else here ... OSS automatically starts
9253 // when fed samples.
9255 MUTEX_UNLOCK( &stream_.mutex );
9257 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9258 pthread_cond_signal( &handle->runnable );
9261 void RtApiOss :: stopStream()
9264 if ( stream_.state == STREAM_STOPPED ) {
9265 errorText_ = "RtApiOss::stopStream(): the stream is already stopped!";
9266 error( RtAudioError::WARNING );
9270 MUTEX_LOCK( &stream_.mutex );
9272 // The state might change while waiting on a mutex.
9273 if ( stream_.state == STREAM_STOPPED ) {
9274 MUTEX_UNLOCK( &stream_.mutex );
9279 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9280 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9282 // Flush the output with zeros a few times.
9285 RtAudioFormat format;
9287 if ( stream_.doConvertBuffer[0] ) {
9288 buffer = stream_.deviceBuffer;
9289 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
9290 format = stream_.deviceFormat[0];
9293 buffer = stream_.userBuffer[0];
9294 samples = stream_.bufferSize * stream_.nUserChannels[0];
9295 format = stream_.userFormat;
9298 memset( buffer, 0, samples * formatBytes(format) );
9299 for ( unsigned int i=0; i<stream_.nBuffers+1; i++ ) {
9300 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9301 if ( result == -1 ) {
9302 errorText_ = "RtApiOss::stopStream: audio write error.";
9303 error( RtAudioError::WARNING );
9307 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9308 if ( result == -1 ) {
9309 errorStream_ << "RtApiOss::stopStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
9310 errorText_ = errorStream_.str();
9313 handle->triggered = false;
9316 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
9317 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9318 if ( result == -1 ) {
9319 errorStream_ << "RtApiOss::stopStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
9320 errorText_ = errorStream_.str();
9326 stream_.state = STREAM_STOPPED;
9327 MUTEX_UNLOCK( &stream_.mutex );
9329 if ( result != -1 ) return;
9330 error( RtAudioError::SYSTEM_ERROR );
9333 void RtApiOss :: abortStream()
9336 if ( stream_.state == STREAM_STOPPED ) {
9337 errorText_ = "RtApiOss::abortStream(): the stream is already stopped!";
9338 error( RtAudioError::WARNING );
9342 MUTEX_LOCK( &stream_.mutex );
9344 // The state might change while waiting on a mutex.
9345 if ( stream_.state == STREAM_STOPPED ) {
9346 MUTEX_UNLOCK( &stream_.mutex );
9351 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9352 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9353 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9354 if ( result == -1 ) {
9355 errorStream_ << "RtApiOss::abortStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
9356 errorText_ = errorStream_.str();
9359 handle->triggered = false;
9362 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
9363 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9364 if ( result == -1 ) {
9365 errorStream_ << "RtApiOss::abortStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
9366 errorText_ = errorStream_.str();
9372 stream_.state = STREAM_STOPPED;
9373 MUTEX_UNLOCK( &stream_.mutex );
9375 if ( result != -1 ) return;
9376 error( RtAudioError::SYSTEM_ERROR );
9379 void RtApiOss :: callbackEvent()
9381 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9382 if ( stream_.state == STREAM_STOPPED ) {
9383 MUTEX_LOCK( &stream_.mutex );
9384 pthread_cond_wait( &handle->runnable, &stream_.mutex );
9385 if ( stream_.state != STREAM_RUNNING ) {
9386 MUTEX_UNLOCK( &stream_.mutex );
9389 MUTEX_UNLOCK( &stream_.mutex );
9392 if ( stream_.state == STREAM_CLOSED ) {
9393 errorText_ = "RtApiOss::callbackEvent(): the stream is closed ... this shouldn't happen!";
9394 error( RtAudioError::WARNING );
9398 // Invoke user callback to get fresh output data.
9399 int doStopStream = 0;
9400 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
9401 double streamTime = getStreamTime();
9402 RtAudioStreamStatus status = 0;
9403 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
9404 status |= RTAUDIO_OUTPUT_UNDERFLOW;
9405 handle->xrun[0] = false;
9407 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
9408 status |= RTAUDIO_INPUT_OVERFLOW;
9409 handle->xrun[1] = false;
9411 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
9412 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
9413 if ( doStopStream == 2 ) {
9414 this->abortStream();
9418 MUTEX_LOCK( &stream_.mutex );
9420 // The state might change while waiting on a mutex.
9421 if ( stream_.state == STREAM_STOPPED ) goto unlock;
9426 RtAudioFormat format;
9428 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9430 // Setup parameters and do buffer conversion if necessary.
9431 if ( stream_.doConvertBuffer[0] ) {
9432 buffer = stream_.deviceBuffer;
9433 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
9434 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
9435 format = stream_.deviceFormat[0];
9438 buffer = stream_.userBuffer[0];
9439 samples = stream_.bufferSize * stream_.nUserChannels[0];
9440 format = stream_.userFormat;
9443 // Do byte swapping if necessary.
9444 if ( stream_.doByteSwap[0] )
9445 byteSwapBuffer( buffer, samples, format );
9447 if ( stream_.mode == DUPLEX && handle->triggered == false ) {
9449 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
9450 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9451 trig = PCM_ENABLE_INPUT|PCM_ENABLE_OUTPUT;
9452 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
9453 handle->triggered = true;
9456 // Write samples to device.
9457 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9459 if ( result == -1 ) {
9460 // We'll assume this is an underrun, though there isn't a
9461 // specific means for determining that.
9462 handle->xrun[0] = true;
9463 errorText_ = "RtApiOss::callbackEvent: audio write error.";
9464 error( RtAudioError::WARNING );
9465 // Continue on to input section.
9469 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
9471 // Setup parameters.
9472 if ( stream_.doConvertBuffer[1] ) {
9473 buffer = stream_.deviceBuffer;
9474 samples = stream_.bufferSize * stream_.nDeviceChannels[1];
9475 format = stream_.deviceFormat[1];
9478 buffer = stream_.userBuffer[1];
9479 samples = stream_.bufferSize * stream_.nUserChannels[1];
9480 format = stream_.userFormat;
9483 // Read samples from device.
9484 result = read( handle->id[1], buffer, samples * formatBytes(format) );
9486 if ( result == -1 ) {
9487 // We'll assume this is an overrun, though there isn't a
9488 // specific means for determining that.
9489 handle->xrun[1] = true;
9490 errorText_ = "RtApiOss::callbackEvent: audio read error.";
9491 error( RtAudioError::WARNING );
9495 // Do byte swapping if necessary.
9496 if ( stream_.doByteSwap[1] )
9497 byteSwapBuffer( buffer, samples, format );
9499 // Do buffer conversion if necessary.
9500 if ( stream_.doConvertBuffer[1] )
9501 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
9505 MUTEX_UNLOCK( &stream_.mutex );
9507 RtApi::tickStreamTime();
9508 if ( doStopStream == 1 ) this->stopStream();
9511 static void *ossCallbackHandler( void *ptr )
9513 CallbackInfo *info = (CallbackInfo *) ptr;
9514 RtApiOss *object = (RtApiOss *) info->object;
9515 bool *isRunning = &info->isRunning;
9517 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
9518 if (info->doRealtime) {
9519 std::cerr << "RtAudio oss: " <<
9520 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
9521 "running realtime scheduling" << std::endl;
9525 while ( *isRunning == true ) {
9526 pthread_testcancel();
9527 object->callbackEvent();
9530 pthread_exit( NULL );
9533 //******************** End of __LINUX_OSS__ *********************//
9537 // *************************************************** //
9539 // Protected common (OS-independent) RtAudio methods.
9541 // *************************************************** //
9543 // This method can be modified to control the behavior of error
9544 // message printing.
9545 void RtApi :: error( RtAudioError::Type type )
9547 errorStream_.str(""); // clear the ostringstream
9549 RtAudioErrorCallback errorCallback = (RtAudioErrorCallback) stream_.callbackInfo.errorCallback;
9550 if ( errorCallback ) {
9551 // abortStream() can generate new error messages. Ignore them. Just keep original one.
9553 if ( firstErrorOccurred_ )
9556 firstErrorOccurred_ = true;
9557 const std::string errorMessage = errorText_;
9559 if ( type != RtAudioError::WARNING && stream_.state != STREAM_STOPPED) {
9560 stream_.callbackInfo.isRunning = false; // exit from the thread
9564 errorCallback( type, errorMessage );
9565 firstErrorOccurred_ = false;
9569 if ( type == RtAudioError::WARNING && showWarnings_ == true )
9570 std::cerr << '\n' << errorText_ << "\n\n";
9571 else if ( type != RtAudioError::WARNING )
9572 throw( RtAudioError( errorText_, type ) );
9575 void RtApi :: verifyStream()
9577 if ( stream_.state == STREAM_CLOSED ) {
9578 errorText_ = "RtApi:: a stream is not open!";
9579 error( RtAudioError::INVALID_USE );
9583 void RtApi :: clearStreamInfo()
9585 stream_.mode = UNINITIALIZED;
9586 stream_.state = STREAM_CLOSED;
9587 stream_.sampleRate = 0;
9588 stream_.bufferSize = 0;
9589 stream_.nBuffers = 0;
9590 stream_.userFormat = 0;
9591 stream_.userInterleaved = true;
9592 stream_.streamTime = 0.0;
9593 stream_.apiHandle = 0;
9594 stream_.deviceBuffer = 0;
9595 stream_.callbackInfo.callback = 0;
9596 stream_.callbackInfo.userData = 0;
9597 stream_.callbackInfo.isRunning = false;
9598 stream_.callbackInfo.errorCallback = 0;
9599 for ( int i=0; i<2; i++ ) {
9600 stream_.device[i] = 11111;
9601 stream_.doConvertBuffer[i] = false;
9602 stream_.deviceInterleaved[i] = true;
9603 stream_.doByteSwap[i] = false;
9604 stream_.nUserChannels[i] = 0;
9605 stream_.nDeviceChannels[i] = 0;
9606 stream_.channelOffset[i] = 0;
9607 stream_.deviceFormat[i] = 0;
9608 stream_.latency[i] = 0;
9609 stream_.userBuffer[i] = 0;
9610 stream_.convertInfo[i].channels = 0;
9611 stream_.convertInfo[i].inJump = 0;
9612 stream_.convertInfo[i].outJump = 0;
9613 stream_.convertInfo[i].inFormat = 0;
9614 stream_.convertInfo[i].outFormat = 0;
9615 stream_.convertInfo[i].inOffset.clear();
9616 stream_.convertInfo[i].outOffset.clear();
9620 unsigned int RtApi :: formatBytes( RtAudioFormat format )
9622 if ( format == RTAUDIO_SINT16 )
9624 else if ( format == RTAUDIO_SINT32 || format == RTAUDIO_FLOAT32 )
9626 else if ( format == RTAUDIO_FLOAT64 )
9628 else if ( format == RTAUDIO_SINT24 )
9630 else if ( format == RTAUDIO_SINT8 )
9633 errorText_ = "RtApi::formatBytes: undefined format.";
9634 error( RtAudioError::WARNING );
9639 void RtApi :: setConvertInfo( StreamMode mode, unsigned int firstChannel )
9641 if ( mode == INPUT ) { // convert device to user buffer
9642 stream_.convertInfo[mode].inJump = stream_.nDeviceChannels[1];
9643 stream_.convertInfo[mode].outJump = stream_.nUserChannels[1];
9644 stream_.convertInfo[mode].inFormat = stream_.deviceFormat[1];
9645 stream_.convertInfo[mode].outFormat = stream_.userFormat;
9647 else { // convert user to device buffer
9648 stream_.convertInfo[mode].inJump = stream_.nUserChannels[0];
9649 stream_.convertInfo[mode].outJump = stream_.nDeviceChannels[0];
9650 stream_.convertInfo[mode].inFormat = stream_.userFormat;
9651 stream_.convertInfo[mode].outFormat = stream_.deviceFormat[0];
9654 if ( stream_.convertInfo[mode].inJump < stream_.convertInfo[mode].outJump )
9655 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].inJump;
9657 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].outJump;
9659 // Set up the interleave/deinterleave offsets.
9660 if ( stream_.deviceInterleaved[mode] != stream_.userInterleaved ) {
9661 if ( ( mode == OUTPUT && stream_.deviceInterleaved[mode] ) ||
9662 ( mode == INPUT && stream_.userInterleaved ) ) {
9663 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
9664 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
9665 stream_.convertInfo[mode].outOffset.push_back( k );
9666 stream_.convertInfo[mode].inJump = 1;
9670 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
9671 stream_.convertInfo[mode].inOffset.push_back( k );
9672 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
9673 stream_.convertInfo[mode].outJump = 1;
9677 else { // no (de)interleaving
9678 if ( stream_.userInterleaved ) {
9679 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
9680 stream_.convertInfo[mode].inOffset.push_back( k );
9681 stream_.convertInfo[mode].outOffset.push_back( k );
9685 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
9686 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
9687 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
9688 stream_.convertInfo[mode].inJump = 1;
9689 stream_.convertInfo[mode].outJump = 1;
9694 // Add channel offset.
9695 if ( firstChannel > 0 ) {
9696 if ( stream_.deviceInterleaved[mode] ) {
9697 if ( mode == OUTPUT ) {
9698 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
9699 stream_.convertInfo[mode].outOffset[k] += firstChannel;
9702 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
9703 stream_.convertInfo[mode].inOffset[k] += firstChannel;
9707 if ( mode == OUTPUT ) {
9708 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
9709 stream_.convertInfo[mode].outOffset[k] += ( firstChannel * stream_.bufferSize );
9712 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
9713 stream_.convertInfo[mode].inOffset[k] += ( firstChannel * stream_.bufferSize );
9719 void RtApi :: convertBuffer( char *outBuffer, char *inBuffer, ConvertInfo &info )
9721 // This function does format conversion, input/output channel compensation, and
9722 // data interleaving/deinterleaving. 24-bit integers are assumed to occupy
9723 // the lower three bytes of a 32-bit integer.
9725 // Clear our device buffer when in/out duplex device channels are different
9726 if ( outBuffer == stream_.deviceBuffer && stream_.mode == DUPLEX &&
9727 ( stream_.nDeviceChannels[0] < stream_.nDeviceChannels[1] ) )
9728 memset( outBuffer, 0, stream_.bufferSize * info.outJump * formatBytes( info.outFormat ) );
9731 if (info.outFormat == RTAUDIO_FLOAT64) {
9733 Float64 *out = (Float64 *)outBuffer;
9735 if (info.inFormat == RTAUDIO_SINT8) {
9736 signed char *in = (signed char *)inBuffer;
9737 scale = 1.0 / 127.5;
9738 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9739 for (j=0; j<info.channels; j++) {
9740 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
9741 out[info.outOffset[j]] += 0.5;
9742 out[info.outOffset[j]] *= scale;
9745 out += info.outJump;
9748 else if (info.inFormat == RTAUDIO_SINT16) {
9749 Int16 *in = (Int16 *)inBuffer;
9750 scale = 1.0 / 32767.5;
9751 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9752 for (j=0; j<info.channels; j++) {
9753 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
9754 out[info.outOffset[j]] += 0.5;
9755 out[info.outOffset[j]] *= scale;
9758 out += info.outJump;
9761 else if (info.inFormat == RTAUDIO_SINT24) {
9762 Int24 *in = (Int24 *)inBuffer;
9763 scale = 1.0 / 8388607.5;
9764 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9765 for (j=0; j<info.channels; j++) {
9766 out[info.outOffset[j]] = (Float64) (in[info.inOffset[j]].asInt());
9767 out[info.outOffset[j]] += 0.5;
9768 out[info.outOffset[j]] *= scale;
9771 out += info.outJump;
9774 else if (info.inFormat == RTAUDIO_SINT32) {
9775 Int32 *in = (Int32 *)inBuffer;
9776 scale = 1.0 / 2147483647.5;
9777 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9778 for (j=0; j<info.channels; j++) {
9779 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
9780 out[info.outOffset[j]] += 0.5;
9781 out[info.outOffset[j]] *= scale;
9784 out += info.outJump;
9787 else if (info.inFormat == RTAUDIO_FLOAT32) {
9788 Float32 *in = (Float32 *)inBuffer;
9789 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9790 for (j=0; j<info.channels; j++) {
9791 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
9794 out += info.outJump;
9797 else if (info.inFormat == RTAUDIO_FLOAT64) {
9798 // Channel compensation and/or (de)interleaving only.
9799 Float64 *in = (Float64 *)inBuffer;
9800 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9801 for (j=0; j<info.channels; j++) {
9802 out[info.outOffset[j]] = in[info.inOffset[j]];
9805 out += info.outJump;
9809 else if (info.outFormat == RTAUDIO_FLOAT32) {
9811 Float32 *out = (Float32 *)outBuffer;
9813 if (info.inFormat == RTAUDIO_SINT8) {
9814 signed char *in = (signed char *)inBuffer;
9815 scale = (Float32) ( 1.0 / 127.5 );
9816 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9817 for (j=0; j<info.channels; j++) {
9818 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
9819 out[info.outOffset[j]] += 0.5;
9820 out[info.outOffset[j]] *= scale;
9823 out += info.outJump;
9826 else if (info.inFormat == RTAUDIO_SINT16) {
9827 Int16 *in = (Int16 *)inBuffer;
9828 scale = (Float32) ( 1.0 / 32767.5 );
9829 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9830 for (j=0; j<info.channels; j++) {
9831 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
9832 out[info.outOffset[j]] += 0.5;
9833 out[info.outOffset[j]] *= scale;
9836 out += info.outJump;
9839 else if (info.inFormat == RTAUDIO_SINT24) {
9840 Int24 *in = (Int24 *)inBuffer;
9841 scale = (Float32) ( 1.0 / 8388607.5 );
9842 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9843 for (j=0; j<info.channels; j++) {
9844 out[info.outOffset[j]] = (Float32) (in[info.inOffset[j]].asInt());
9845 out[info.outOffset[j]] += 0.5;
9846 out[info.outOffset[j]] *= scale;
9849 out += info.outJump;
9852 else if (info.inFormat == RTAUDIO_SINT32) {
9853 Int32 *in = (Int32 *)inBuffer;
9854 scale = (Float32) ( 1.0 / 2147483647.5 );
9855 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9856 for (j=0; j<info.channels; j++) {
9857 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
9858 out[info.outOffset[j]] += 0.5;
9859 out[info.outOffset[j]] *= scale;
9862 out += info.outJump;
9865 else if (info.inFormat == RTAUDIO_FLOAT32) {
9866 // Channel compensation and/or (de)interleaving only.
9867 Float32 *in = (Float32 *)inBuffer;
9868 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9869 for (j=0; j<info.channels; j++) {
9870 out[info.outOffset[j]] = in[info.inOffset[j]];
9873 out += info.outJump;
9876 else if (info.inFormat == RTAUDIO_FLOAT64) {
9877 Float64 *in = (Float64 *)inBuffer;
9878 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9879 for (j=0; j<info.channels; j++) {
9880 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
9883 out += info.outJump;
9887 else if (info.outFormat == RTAUDIO_SINT32) {
9888 Int32 *out = (Int32 *)outBuffer;
9889 if (info.inFormat == RTAUDIO_SINT8) {
9890 signed char *in = (signed char *)inBuffer;
9891 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9892 for (j=0; j<info.channels; j++) {
9893 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
9894 out[info.outOffset[j]] <<= 24;
9897 out += info.outJump;
9900 else if (info.inFormat == RTAUDIO_SINT16) {
9901 Int16 *in = (Int16 *)inBuffer;
9902 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9903 for (j=0; j<info.channels; j++) {
9904 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
9905 out[info.outOffset[j]] <<= 16;
9908 out += info.outJump;
9911 else if (info.inFormat == RTAUDIO_SINT24) {
9912 Int24 *in = (Int24 *)inBuffer;
9913 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9914 for (j=0; j<info.channels; j++) {
9915 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]].asInt();
9916 out[info.outOffset[j]] <<= 8;
9919 out += info.outJump;
9922 else if (info.inFormat == RTAUDIO_SINT32) {
9923 // Channel compensation and/or (de)interleaving only.
9924 Int32 *in = (Int32 *)inBuffer;
9925 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9926 for (j=0; j<info.channels; j++) {
9927 out[info.outOffset[j]] = in[info.inOffset[j]];
9930 out += info.outJump;
9933 else if (info.inFormat == RTAUDIO_FLOAT32) {
9934 Float32 *in = (Float32 *)inBuffer;
9935 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9936 for (j=0; j<info.channels; j++) {
9937 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
9940 out += info.outJump;
9943 else if (info.inFormat == RTAUDIO_FLOAT64) {
9944 Float64 *in = (Float64 *)inBuffer;
9945 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9946 for (j=0; j<info.channels; j++) {
9947 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
9950 out += info.outJump;
9954 else if (info.outFormat == RTAUDIO_SINT24) {
9955 Int24 *out = (Int24 *)outBuffer;
9956 if (info.inFormat == RTAUDIO_SINT8) {
9957 signed char *in = (signed char *)inBuffer;
9958 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9959 for (j=0; j<info.channels; j++) {
9960 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 16);
9961 //out[info.outOffset[j]] <<= 16;
9964 out += info.outJump;
9967 else if (info.inFormat == RTAUDIO_SINT16) {
9968 Int16 *in = (Int16 *)inBuffer;
9969 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9970 for (j=0; j<info.channels; j++) {
9971 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 8);
9972 //out[info.outOffset[j]] <<= 8;
9975 out += info.outJump;
9978 else if (info.inFormat == RTAUDIO_SINT24) {
9979 // Channel compensation and/or (de)interleaving only.
9980 Int24 *in = (Int24 *)inBuffer;
9981 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9982 for (j=0; j<info.channels; j++) {
9983 out[info.outOffset[j]] = in[info.inOffset[j]];
9986 out += info.outJump;
9989 else if (info.inFormat == RTAUDIO_SINT32) {
9990 Int32 *in = (Int32 *)inBuffer;
9991 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9992 for (j=0; j<info.channels; j++) {
9993 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] >> 8);
9994 //out[info.outOffset[j]] >>= 8;
9997 out += info.outJump;
10000 else if (info.inFormat == RTAUDIO_FLOAT32) {
10001 Float32 *in = (Float32 *)inBuffer;
10002 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10003 for (j=0; j<info.channels; j++) {
10004 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
10007 out += info.outJump;
10010 else if (info.inFormat == RTAUDIO_FLOAT64) {
10011 Float64 *in = (Float64 *)inBuffer;
10012 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10013 for (j=0; j<info.channels; j++) {
10014 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
10017 out += info.outJump;
10021 else if (info.outFormat == RTAUDIO_SINT16) {
10022 Int16 *out = (Int16 *)outBuffer;
10023 if (info.inFormat == RTAUDIO_SINT8) {
10024 signed char *in = (signed char *)inBuffer;
10025 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10026 for (j=0; j<info.channels; j++) {
10027 out[info.outOffset[j]] = (Int16) in[info.inOffset[j]];
10028 out[info.outOffset[j]] <<= 8;
10031 out += info.outJump;
10034 else if (info.inFormat == RTAUDIO_SINT16) {
10035 // Channel compensation and/or (de)interleaving only.
10036 Int16 *in = (Int16 *)inBuffer;
10037 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10038 for (j=0; j<info.channels; j++) {
10039 out[info.outOffset[j]] = in[info.inOffset[j]];
10042 out += info.outJump;
10045 else if (info.inFormat == RTAUDIO_SINT24) {
10046 Int24 *in = (Int24 *)inBuffer;
10047 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10048 for (j=0; j<info.channels; j++) {
10049 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]].asInt() >> 8);
10052 out += info.outJump;
10055 else if (info.inFormat == RTAUDIO_SINT32) {
10056 Int32 *in = (Int32 *)inBuffer;
10057 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10058 for (j=0; j<info.channels; j++) {
10059 out[info.outOffset[j]] = (Int16) ((in[info.inOffset[j]] >> 16) & 0x0000ffff);
10062 out += info.outJump;
10065 else if (info.inFormat == RTAUDIO_FLOAT32) {
10066 Float32 *in = (Float32 *)inBuffer;
10067 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10068 for (j=0; j<info.channels; j++) {
10069 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
10072 out += info.outJump;
10075 else if (info.inFormat == RTAUDIO_FLOAT64) {
10076 Float64 *in = (Float64 *)inBuffer;
10077 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10078 for (j=0; j<info.channels; j++) {
10079 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
10082 out += info.outJump;
10086 else if (info.outFormat == RTAUDIO_SINT8) {
10087 signed char *out = (signed char *)outBuffer;
10088 if (info.inFormat == RTAUDIO_SINT8) {
10089 // Channel compensation and/or (de)interleaving only.
10090 signed char *in = (signed char *)inBuffer;
10091 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10092 for (j=0; j<info.channels; j++) {
10093 out[info.outOffset[j]] = in[info.inOffset[j]];
10096 out += info.outJump;
10099 if (info.inFormat == RTAUDIO_SINT16) {
10100 Int16 *in = (Int16 *)inBuffer;
10101 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10102 for (j=0; j<info.channels; j++) {
10103 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 8) & 0x00ff);
10106 out += info.outJump;
10109 else if (info.inFormat == RTAUDIO_SINT24) {
10110 Int24 *in = (Int24 *)inBuffer;
10111 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10112 for (j=0; j<info.channels; j++) {
10113 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]].asInt() >> 16);
10116 out += info.outJump;
10119 else if (info.inFormat == RTAUDIO_SINT32) {
10120 Int32 *in = (Int32 *)inBuffer;
10121 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10122 for (j=0; j<info.channels; j++) {
10123 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 24) & 0x000000ff);
10126 out += info.outJump;
10129 else if (info.inFormat == RTAUDIO_FLOAT32) {
10130 Float32 *in = (Float32 *)inBuffer;
10131 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10132 for (j=0; j<info.channels; j++) {
10133 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
10136 out += info.outJump;
10139 else if (info.inFormat == RTAUDIO_FLOAT64) {
10140 Float64 *in = (Float64 *)inBuffer;
10141 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10142 for (j=0; j<info.channels; j++) {
10143 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
10146 out += info.outJump;
10152 //static inline uint16_t bswap_16(uint16_t x) { return (x>>8) | (x<<8); }
10153 //static inline uint32_t bswap_32(uint32_t x) { return (bswap_16(x&0xffff)<<16) | (bswap_16(x>>16)); }
10154 //static inline uint64_t bswap_64(uint64_t x) { return (((unsigned long long)bswap_32(x&0xffffffffull))<<32) | (bswap_32(x>>32)); }
10156 void RtApi :: byteSwapBuffer( char *buffer, unsigned int samples, RtAudioFormat format )
10162 if ( format == RTAUDIO_SINT16 ) {
10163 for ( unsigned int i=0; i<samples; i++ ) {
10164 // Swap 1st and 2nd bytes.
10169 // Increment 2 bytes.
10173 else if ( format == RTAUDIO_SINT32 ||
10174 format == RTAUDIO_FLOAT32 ) {
10175 for ( unsigned int i=0; i<samples; i++ ) {
10176 // Swap 1st and 4th bytes.
10181 // Swap 2nd and 3rd bytes.
10187 // Increment 3 more bytes.
10191 else if ( format == RTAUDIO_SINT24 ) {
10192 for ( unsigned int i=0; i<samples; i++ ) {
10193 // Swap 1st and 3rd bytes.
10198 // Increment 2 more bytes.
10202 else if ( format == RTAUDIO_FLOAT64 ) {
10203 for ( unsigned int i=0; i<samples; i++ ) {
10204 // Swap 1st and 8th bytes
10209 // Swap 2nd and 7th bytes
10215 // Swap 3rd and 6th bytes
10221 // Swap 4th and 5th bytes
10227 // Increment 5 more bytes.
10233 // Indentation settings for Vim and Emacs
10235 // Local Variables:
10236 // c-basic-offset: 2
10237 // indent-tabs-mode: nil
10240 // vim: et sts=2 sw=2