1 /************************************************************************/
3 \brief Realtime audio i/o C++ classes.
5 RtAudio provides a common API (Application Programming Interface)
6 for realtime audio input/output across Linux (native ALSA, Jack,
7 and OSS), Macintosh OS X (CoreAudio and Jack), and Windows
8 (DirectSound, ASIO and WASAPI) operating systems.
10 RtAudio WWW site: http://www.music.mcgill.ca/~gary/rtaudio/
12 RtAudio: realtime audio i/o C++ classes
13 Copyright (c) 2001-2017 Gary P. Scavone
15 Permission is hereby granted, free of charge, to any person
16 obtaining a copy of this software and associated documentation files
17 (the "Software"), to deal in the Software without restriction,
18 including without limitation the rights to use, copy, modify, merge,
19 publish, distribute, sublicense, and/or sell copies of the Software,
20 and to permit persons to whom the Software is furnished to do so,
21 subject to the following conditions:
23 The above copyright notice and this permission notice shall be
24 included in all copies or substantial portions of the Software.
26 Any person wishing to distribute modifications to the Software is
27 asked to send the modifications to the original developer so that
28 they can be incorporated into the canonical version. This is,
29 however, not a binding provision of this license.
31 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
32 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
33 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
34 IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
35 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
36 CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
37 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
39 /************************************************************************/
41 // RtAudio: Version 5.0.0
51 // Static variable definitions.
52 const unsigned int RtApi::MAX_SAMPLE_RATES = 14;
53 const unsigned int RtApi::SAMPLE_RATES[] = {
54 4000, 5512, 8000, 9600, 11025, 16000, 22050,
55 32000, 44100, 48000, 88200, 96000, 176400, 192000
58 #if defined(__WINDOWS_DS__) || defined(__WINDOWS_ASIO__) || defined(__WINDOWS_WASAPI__)
59 #define MUTEX_INITIALIZE(A) InitializeCriticalSection(A)
60 #define MUTEX_DESTROY(A) DeleteCriticalSection(A)
61 #define MUTEX_LOCK(A) EnterCriticalSection(A)
62 #define MUTEX_UNLOCK(A) LeaveCriticalSection(A)
66 static std::string convertCharPointerToStdString(const char *text)
68 return std::string(text);
71 static std::string convertCharPointerToStdString(const wchar_t *text)
73 int length = WideCharToMultiByte(CP_UTF8, 0, text, -1, NULL, 0, NULL, NULL);
74 std::string s( length-1, '\0' );
75 WideCharToMultiByte(CP_UTF8, 0, text, -1, &s[0], length, NULL, NULL);
79 #elif defined(__LINUX_ALSA__) || defined(__LINUX_PULSE__) || defined(__UNIX_JACK__) || defined(__LINUX_OSS__) || defined(__MACOSX_CORE__)
81 #define MUTEX_INITIALIZE(A) pthread_mutex_init(A, NULL)
82 #define MUTEX_DESTROY(A) pthread_mutex_destroy(A)
83 #define MUTEX_LOCK(A) pthread_mutex_lock(A)
84 #define MUTEX_UNLOCK(A) pthread_mutex_unlock(A)
86 #define MUTEX_INITIALIZE(A) abs(*A) // dummy definitions
87 #define MUTEX_DESTROY(A) abs(*A) // dummy definitions
90 // *************************************************** //
92 // RtAudio definitions.
94 // *************************************************** //
96 std::string RtAudio :: getVersion( void )
98 return RTAUDIO_VERSION;
101 void RtAudio :: getCompiledApi( std::vector<RtAudio::Api> &apis )
105 // The order here will control the order of RtAudio's API search in
107 #if defined(__UNIX_JACK__)
108 apis.push_back( UNIX_JACK );
110 #if defined(__LINUX_ALSA__)
111 apis.push_back( LINUX_ALSA );
113 #if defined(__LINUX_PULSE__)
114 apis.push_back( LINUX_PULSE );
116 #if defined(__LINUX_OSS__)
117 apis.push_back( LINUX_OSS );
119 #if defined(__WINDOWS_ASIO__)
120 apis.push_back( WINDOWS_ASIO );
122 #if defined(__WINDOWS_WASAPI__)
123 apis.push_back( WINDOWS_WASAPI );
125 #if defined(__WINDOWS_DS__)
126 apis.push_back( WINDOWS_DS );
128 #if defined(__MACOSX_CORE__)
129 apis.push_back( MACOSX_CORE );
131 #if defined(__RTAUDIO_DUMMY__)
132 apis.push_back( RTAUDIO_DUMMY );
136 void RtAudio :: openRtApi( RtAudio::Api api )
142 #if defined(__UNIX_JACK__)
143 if ( api == UNIX_JACK )
144 rtapi_ = new RtApiJack();
146 #if defined(__LINUX_ALSA__)
147 if ( api == LINUX_ALSA )
148 rtapi_ = new RtApiAlsa();
150 #if defined(__LINUX_PULSE__)
151 if ( api == LINUX_PULSE )
152 rtapi_ = new RtApiPulse();
154 #if defined(__LINUX_OSS__)
155 if ( api == LINUX_OSS )
156 rtapi_ = new RtApiOss();
158 #if defined(__WINDOWS_ASIO__)
159 if ( api == WINDOWS_ASIO )
160 rtapi_ = new RtApiAsio();
162 #if defined(__WINDOWS_WASAPI__)
163 if ( api == WINDOWS_WASAPI )
164 rtapi_ = new RtApiWasapi();
166 #if defined(__WINDOWS_DS__)
167 if ( api == WINDOWS_DS )
168 rtapi_ = new RtApiDs();
170 #if defined(__MACOSX_CORE__)
171 if ( api == MACOSX_CORE )
172 rtapi_ = new RtApiCore();
174 #if defined(__RTAUDIO_DUMMY__)
175 if ( api == RTAUDIO_DUMMY )
176 rtapi_ = new RtApiDummy();
180 RtAudio :: RtAudio( RtAudio::Api api )
184 if ( api != UNSPECIFIED ) {
185 // Attempt to open the specified API.
187 if ( rtapi_ ) return;
189 // No compiled support for specified API value. Issue a debug
190 // warning and continue as if no API was specified.
191 std::cerr << "\nRtAudio: no compiled support for specified API argument!\n" << std::endl;
194 // Iterate through the compiled APIs and return as soon as we find
195 // one with at least one device or we reach the end of the list.
196 std::vector< RtAudio::Api > apis;
197 getCompiledApi( apis );
198 for ( unsigned int i=0; i<apis.size(); i++ ) {
199 openRtApi( apis[i] );
200 if ( rtapi_ && rtapi_->getDeviceCount() ) break;
203 if ( rtapi_ ) return;
205 // It should not be possible to get here because the preprocessor
206 // definition __RTAUDIO_DUMMY__ is automatically defined if no
207 // API-specific definitions are passed to the compiler. But just in
208 // case something weird happens, we'll thow an error.
209 std::string errorText = "\nRtAudio: no compiled API support found ... critical error!!\n\n";
210 throw( RtAudioError( errorText, RtAudioError::UNSPECIFIED ) );
213 RtAudio :: ~RtAudio()
219 void RtAudio :: openStream( RtAudio::StreamParameters *outputParameters,
220 RtAudio::StreamParameters *inputParameters,
221 RtAudioFormat format, unsigned int sampleRate,
222 unsigned int *bufferFrames,
223 RtAudioCallback callback, void *userData,
224 RtAudio::StreamOptions *options,
225 RtAudioErrorCallback errorCallback )
227 return rtapi_->openStream( outputParameters, inputParameters, format,
228 sampleRate, bufferFrames, callback,
229 userData, options, errorCallback );
232 // *************************************************** //
234 // Public RtApi definitions (see end of file for
235 // private or protected utility functions).
237 // *************************************************** //
241 stream_.state = STREAM_CLOSED;
242 stream_.mode = UNINITIALIZED;
243 stream_.apiHandle = 0;
244 stream_.userBuffer[0] = 0;
245 stream_.userBuffer[1] = 0;
246 MUTEX_INITIALIZE( &stream_.mutex );
247 showWarnings_ = true;
248 firstErrorOccurred_ = false;
253 MUTEX_DESTROY( &stream_.mutex );
256 void RtApi :: openStream( RtAudio::StreamParameters *oParams,
257 RtAudio::StreamParameters *iParams,
258 RtAudioFormat format, unsigned int sampleRate,
259 unsigned int *bufferFrames,
260 RtAudioCallback callback, void *userData,
261 RtAudio::StreamOptions *options,
262 RtAudioErrorCallback errorCallback )
264 if ( stream_.state != STREAM_CLOSED ) {
265 errorText_ = "RtApi::openStream: a stream is already open!";
266 error( RtAudioError::INVALID_USE );
270 // Clear stream information potentially left from a previously open stream.
273 if ( oParams && oParams->nChannels < 1 ) {
274 errorText_ = "RtApi::openStream: a non-NULL output StreamParameters structure cannot have an nChannels value less than one.";
275 error( RtAudioError::INVALID_USE );
279 if ( iParams && iParams->nChannels < 1 ) {
280 errorText_ = "RtApi::openStream: a non-NULL input StreamParameters structure cannot have an nChannels value less than one.";
281 error( RtAudioError::INVALID_USE );
285 if ( oParams == NULL && iParams == NULL ) {
286 errorText_ = "RtApi::openStream: input and output StreamParameters structures are both NULL!";
287 error( RtAudioError::INVALID_USE );
291 if ( formatBytes(format) == 0 ) {
292 errorText_ = "RtApi::openStream: 'format' parameter value is undefined.";
293 error( RtAudioError::INVALID_USE );
297 unsigned int nDevices = getDeviceCount();
298 unsigned int oChannels = 0;
300 oChannels = oParams->nChannels;
301 if ( oParams->deviceId >= nDevices ) {
302 errorText_ = "RtApi::openStream: output device parameter value is invalid.";
303 error( RtAudioError::INVALID_USE );
308 unsigned int iChannels = 0;
310 iChannels = iParams->nChannels;
311 if ( iParams->deviceId >= nDevices ) {
312 errorText_ = "RtApi::openStream: input device parameter value is invalid.";
313 error( RtAudioError::INVALID_USE );
320 if ( oChannels > 0 ) {
322 result = probeDeviceOpen( oParams->deviceId, OUTPUT, oChannels, oParams->firstChannel,
323 sampleRate, format, bufferFrames, options );
324 if ( result == false ) {
325 error( RtAudioError::SYSTEM_ERROR );
330 if ( iChannels > 0 ) {
332 result = probeDeviceOpen( iParams->deviceId, INPUT, iChannels, iParams->firstChannel,
333 sampleRate, format, bufferFrames, options );
334 if ( result == false ) {
335 if ( oChannels > 0 ) closeStream();
336 error( RtAudioError::SYSTEM_ERROR );
341 stream_.callbackInfo.callback = (void *) callback;
342 stream_.callbackInfo.userData = userData;
343 stream_.callbackInfo.errorCallback = (void *) errorCallback;
345 if ( options ) options->numberOfBuffers = stream_.nBuffers;
346 stream_.state = STREAM_STOPPED;
349 unsigned int RtApi :: getDefaultInputDevice( void )
351 // Should be implemented in subclasses if possible.
355 unsigned int RtApi :: getDefaultOutputDevice( void )
357 // Should be implemented in subclasses if possible.
361 void RtApi :: closeStream( void )
363 // MUST be implemented in subclasses!
367 bool RtApi :: probeDeviceOpen( unsigned int /*device*/, StreamMode /*mode*/, unsigned int /*channels*/,
368 unsigned int /*firstChannel*/, unsigned int /*sampleRate*/,
369 RtAudioFormat /*format*/, unsigned int * /*bufferSize*/,
370 RtAudio::StreamOptions * /*options*/ )
372 // MUST be implemented in subclasses!
376 void RtApi :: tickStreamTime( void )
378 // Subclasses that do not provide their own implementation of
379 // getStreamTime should call this function once per buffer I/O to
380 // provide basic stream time support.
382 stream_.streamTime += ( stream_.bufferSize * 1.0 / stream_.sampleRate );
384 #if defined( HAVE_GETTIMEOFDAY )
385 gettimeofday( &stream_.lastTickTimestamp, NULL );
389 long RtApi :: getStreamLatency( void )
393 long totalLatency = 0;
394 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
395 totalLatency = stream_.latency[0];
396 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
397 totalLatency += stream_.latency[1];
402 double RtApi :: getStreamTime( void )
406 #if defined( HAVE_GETTIMEOFDAY )
407 // Return a very accurate estimate of the stream time by
408 // adding in the elapsed time since the last tick.
412 if ( stream_.state != STREAM_RUNNING || stream_.streamTime == 0.0 )
413 return stream_.streamTime;
415 gettimeofday( &now, NULL );
416 then = stream_.lastTickTimestamp;
417 return stream_.streamTime +
418 ((now.tv_sec + 0.000001 * now.tv_usec) -
419 (then.tv_sec + 0.000001 * then.tv_usec));
421 return stream_.streamTime;
425 void RtApi :: setStreamTime( double time )
430 stream_.streamTime = time;
431 #if defined( HAVE_GETTIMEOFDAY )
432 gettimeofday( &stream_.lastTickTimestamp, NULL );
436 unsigned int RtApi :: getStreamSampleRate( void )
440 return stream_.sampleRate;
444 // *************************************************** //
446 // OS/API-specific methods.
448 // *************************************************** //
450 #if defined(__MACOSX_CORE__)
452 // The OS X CoreAudio API is designed to use a separate callback
453 // procedure for each of its audio devices. A single RtAudio duplex
454 // stream using two different devices is supported here, though it
455 // cannot be guaranteed to always behave correctly because we cannot
456 // synchronize these two callbacks.
458 // A property listener is installed for over/underrun information.
459 // However, no functionality is currently provided to allow property
460 // listeners to trigger user handlers because it is unclear what could
461 // be done if a critical stream parameter (buffer size, sample rate,
462 // device disconnect) notification arrived. The listeners entail
463 // quite a bit of extra code and most likely, a user program wouldn't
464 // be prepared for the result anyway. However, we do provide a flag
465 // to the client callback function to inform of an over/underrun.
467 // A structure to hold various information related to the CoreAudio API
470 AudioDeviceID id[2]; // device ids
471 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
472 AudioDeviceIOProcID procId[2];
474 UInt32 iStream[2]; // device stream index (or first if using multiple)
475 UInt32 nStreams[2]; // number of streams to use
478 pthread_cond_t condition;
479 int drainCounter; // Tracks callback counts when draining
480 bool internalDrain; // Indicates if stop is initiated from callback or not.
483 :deviceBuffer(0), drainCounter(0), internalDrain(false) { nStreams[0] = 1; nStreams[1] = 1; id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
486 RtApiCore:: RtApiCore()
488 #if defined( AVAILABLE_MAC_OS_X_VERSION_10_6_AND_LATER )
489 // This is a largely undocumented but absolutely necessary
490 // requirement starting with OS-X 10.6. If not called, queries and
491 // updates to various audio device properties are not handled
493 CFRunLoopRef theRunLoop = NULL;
494 AudioObjectPropertyAddress property = { kAudioHardwarePropertyRunLoop,
495 kAudioObjectPropertyScopeGlobal,
496 kAudioObjectPropertyElementMaster };
497 OSStatus result = AudioObjectSetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, sizeof(CFRunLoopRef), &theRunLoop);
498 if ( result != noErr ) {
499 errorText_ = "RtApiCore::RtApiCore: error setting run loop property!";
500 error( RtAudioError::WARNING );
505 RtApiCore :: ~RtApiCore()
507 // The subclass destructor gets called before the base class
508 // destructor, so close an existing stream before deallocating
509 // apiDeviceId memory.
510 if ( stream_.state != STREAM_CLOSED ) closeStream();
513 unsigned int RtApiCore :: getDeviceCount( void )
515 // Find out how many audio devices there are, if any.
517 AudioObjectPropertyAddress propertyAddress = { kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
518 OSStatus result = AudioObjectGetPropertyDataSize( kAudioObjectSystemObject, &propertyAddress, 0, NULL, &dataSize );
519 if ( result != noErr ) {
520 errorText_ = "RtApiCore::getDeviceCount: OS-X error getting device info!";
521 error( RtAudioError::WARNING );
525 return dataSize / sizeof( AudioDeviceID );
528 unsigned int RtApiCore :: getDefaultInputDevice( void )
530 unsigned int nDevices = getDeviceCount();
531 if ( nDevices <= 1 ) return 0;
534 UInt32 dataSize = sizeof( AudioDeviceID );
535 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultInputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
536 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
537 if ( result != noErr ) {
538 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device.";
539 error( RtAudioError::WARNING );
543 dataSize *= nDevices;
544 AudioDeviceID deviceList[ nDevices ];
545 property.mSelector = kAudioHardwarePropertyDevices;
546 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
547 if ( result != noErr ) {
548 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device IDs.";
549 error( RtAudioError::WARNING );
553 for ( unsigned int i=0; i<nDevices; i++ )
554 if ( id == deviceList[i] ) return i;
556 errorText_ = "RtApiCore::getDefaultInputDevice: No default device found!";
557 error( RtAudioError::WARNING );
561 unsigned int RtApiCore :: getDefaultOutputDevice( void )
563 unsigned int nDevices = getDeviceCount();
564 if ( nDevices <= 1 ) return 0;
567 UInt32 dataSize = sizeof( AudioDeviceID );
568 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultOutputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
569 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
570 if ( result != noErr ) {
571 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device.";
572 error( RtAudioError::WARNING );
576 dataSize = sizeof( AudioDeviceID ) * nDevices;
577 AudioDeviceID deviceList[ nDevices ];
578 property.mSelector = kAudioHardwarePropertyDevices;
579 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
580 if ( result != noErr ) {
581 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device IDs.";
582 error( RtAudioError::WARNING );
586 for ( unsigned int i=0; i<nDevices; i++ )
587 if ( id == deviceList[i] ) return i;
589 errorText_ = "RtApiCore::getDefaultOutputDevice: No default device found!";
590 error( RtAudioError::WARNING );
594 RtAudio::DeviceInfo RtApiCore :: getDeviceInfo( unsigned int device )
596 RtAudio::DeviceInfo info;
600 unsigned int nDevices = getDeviceCount();
601 if ( nDevices == 0 ) {
602 errorText_ = "RtApiCore::getDeviceInfo: no devices found!";
603 error( RtAudioError::INVALID_USE );
607 if ( device >= nDevices ) {
608 errorText_ = "RtApiCore::getDeviceInfo: device ID is invalid!";
609 error( RtAudioError::INVALID_USE );
613 AudioDeviceID deviceList[ nDevices ];
614 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
615 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
616 kAudioObjectPropertyScopeGlobal,
617 kAudioObjectPropertyElementMaster };
618 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
619 0, NULL, &dataSize, (void *) &deviceList );
620 if ( result != noErr ) {
621 errorText_ = "RtApiCore::getDeviceInfo: OS-X system error getting device IDs.";
622 error( RtAudioError::WARNING );
626 AudioDeviceID id = deviceList[ device ];
628 // Get the device name.
631 dataSize = sizeof( CFStringRef );
632 property.mSelector = kAudioObjectPropertyManufacturer;
633 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
634 if ( result != noErr ) {
635 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device manufacturer.";
636 errorText_ = errorStream_.str();
637 error( RtAudioError::WARNING );
641 //const char *mname = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
642 int length = CFStringGetLength(cfname);
643 char *mname = (char *)malloc(length * 3 + 1);
644 #if defined( UNICODE ) || defined( _UNICODE )
645 CFStringGetCString(cfname, mname, length * 3 + 1, kCFStringEncodingUTF8);
647 CFStringGetCString(cfname, mname, length * 3 + 1, CFStringGetSystemEncoding());
649 info.name.append( (const char *)mname, strlen(mname) );
650 info.name.append( ": " );
654 property.mSelector = kAudioObjectPropertyName;
655 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
656 if ( result != noErr ) {
657 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device name.";
658 errorText_ = errorStream_.str();
659 error( RtAudioError::WARNING );
663 //const char *name = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
664 length = CFStringGetLength(cfname);
665 char *name = (char *)malloc(length * 3 + 1);
666 #if defined( UNICODE ) || defined( _UNICODE )
667 CFStringGetCString(cfname, name, length * 3 + 1, kCFStringEncodingUTF8);
669 CFStringGetCString(cfname, name, length * 3 + 1, CFStringGetSystemEncoding());
671 info.name.append( (const char *)name, strlen(name) );
675 // Get the output stream "configuration".
676 AudioBufferList *bufferList = nil;
677 property.mSelector = kAudioDevicePropertyStreamConfiguration;
678 property.mScope = kAudioDevicePropertyScopeOutput;
679 // property.mElement = kAudioObjectPropertyElementWildcard;
681 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
682 if ( result != noErr || dataSize == 0 ) {
683 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration info for device (" << device << ").";
684 errorText_ = errorStream_.str();
685 error( RtAudioError::WARNING );
689 // Allocate the AudioBufferList.
690 bufferList = (AudioBufferList *) malloc( dataSize );
691 if ( bufferList == NULL ) {
692 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating output AudioBufferList.";
693 error( RtAudioError::WARNING );
697 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
698 if ( result != noErr || dataSize == 0 ) {
700 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration for device (" << device << ").";
701 errorText_ = errorStream_.str();
702 error( RtAudioError::WARNING );
706 // Get output channel information.
707 unsigned int i, nStreams = bufferList->mNumberBuffers;
708 for ( i=0; i<nStreams; i++ )
709 info.outputChannels += bufferList->mBuffers[i].mNumberChannels;
712 // Get the input stream "configuration".
713 property.mScope = kAudioDevicePropertyScopeInput;
714 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
715 if ( result != noErr || dataSize == 0 ) {
716 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration info for device (" << device << ").";
717 errorText_ = errorStream_.str();
718 error( RtAudioError::WARNING );
722 // Allocate the AudioBufferList.
723 bufferList = (AudioBufferList *) malloc( dataSize );
724 if ( bufferList == NULL ) {
725 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating input AudioBufferList.";
726 error( RtAudioError::WARNING );
730 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
731 if (result != noErr || dataSize == 0) {
733 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration for device (" << device << ").";
734 errorText_ = errorStream_.str();
735 error( RtAudioError::WARNING );
739 // Get input channel information.
740 nStreams = bufferList->mNumberBuffers;
741 for ( i=0; i<nStreams; i++ )
742 info.inputChannels += bufferList->mBuffers[i].mNumberChannels;
745 // If device opens for both playback and capture, we determine the channels.
746 if ( info.outputChannels > 0 && info.inputChannels > 0 )
747 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
749 // Probe the device sample rates.
750 bool isInput = false;
751 if ( info.outputChannels == 0 ) isInput = true;
753 // Determine the supported sample rates.
754 property.mSelector = kAudioDevicePropertyAvailableNominalSampleRates;
755 if ( isInput == false ) property.mScope = kAudioDevicePropertyScopeOutput;
756 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
757 if ( result != kAudioHardwareNoError || dataSize == 0 ) {
758 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rate info.";
759 errorText_ = errorStream_.str();
760 error( RtAudioError::WARNING );
764 UInt32 nRanges = dataSize / sizeof( AudioValueRange );
765 AudioValueRange rangeList[ nRanges ];
766 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &rangeList );
767 if ( result != kAudioHardwareNoError ) {
768 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rates.";
769 errorText_ = errorStream_.str();
770 error( RtAudioError::WARNING );
774 // The sample rate reporting mechanism is a bit of a mystery. It
775 // seems that it can either return individual rates or a range of
776 // rates. I assume that if the min / max range values are the same,
777 // then that represents a single supported rate and if the min / max
778 // range values are different, the device supports an arbitrary
779 // range of values (though there might be multiple ranges, so we'll
780 // use the most conservative range).
781 Float64 minimumRate = 1.0, maximumRate = 10000000000.0;
782 bool haveValueRange = false;
783 info.sampleRates.clear();
784 for ( UInt32 i=0; i<nRanges; i++ ) {
785 if ( rangeList[i].mMinimum == rangeList[i].mMaximum ) {
786 unsigned int tmpSr = (unsigned int) rangeList[i].mMinimum;
787 info.sampleRates.push_back( tmpSr );
789 if ( !info.preferredSampleRate || ( tmpSr <= 48000 && tmpSr > info.preferredSampleRate ) )
790 info.preferredSampleRate = tmpSr;
793 haveValueRange = true;
794 if ( rangeList[i].mMinimum > minimumRate ) minimumRate = rangeList[i].mMinimum;
795 if ( rangeList[i].mMaximum < maximumRate ) maximumRate = rangeList[i].mMaximum;
799 if ( haveValueRange ) {
800 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
801 if ( SAMPLE_RATES[k] >= (unsigned int) minimumRate && SAMPLE_RATES[k] <= (unsigned int) maximumRate ) {
802 info.sampleRates.push_back( SAMPLE_RATES[k] );
804 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
805 info.preferredSampleRate = SAMPLE_RATES[k];
810 // Sort and remove any redundant values
811 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
812 info.sampleRates.erase( unique( info.sampleRates.begin(), info.sampleRates.end() ), info.sampleRates.end() );
814 if ( info.sampleRates.size() == 0 ) {
815 errorStream_ << "RtApiCore::probeDeviceInfo: No supported sample rates found for device (" << device << ").";
816 errorText_ = errorStream_.str();
817 error( RtAudioError::WARNING );
821 // CoreAudio always uses 32-bit floating point data for PCM streams.
822 // Thus, any other "physical" formats supported by the device are of
823 // no interest to the client.
824 info.nativeFormats = RTAUDIO_FLOAT32;
826 if ( info.outputChannels > 0 )
827 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
828 if ( info.inputChannels > 0 )
829 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
835 static OSStatus callbackHandler( AudioDeviceID inDevice,
836 const AudioTimeStamp* /*inNow*/,
837 const AudioBufferList* inInputData,
838 const AudioTimeStamp* /*inInputTime*/,
839 AudioBufferList* outOutputData,
840 const AudioTimeStamp* /*inOutputTime*/,
843 CallbackInfo *info = (CallbackInfo *) infoPointer;
845 RtApiCore *object = (RtApiCore *) info->object;
846 if ( object->callbackEvent( inDevice, inInputData, outOutputData ) == false )
847 return kAudioHardwareUnspecifiedError;
849 return kAudioHardwareNoError;
852 static OSStatus xrunListener( AudioObjectID /*inDevice*/,
854 const AudioObjectPropertyAddress properties[],
855 void* handlePointer )
857 CoreHandle *handle = (CoreHandle *) handlePointer;
858 for ( UInt32 i=0; i<nAddresses; i++ ) {
859 if ( properties[i].mSelector == kAudioDeviceProcessorOverload ) {
860 if ( properties[i].mScope == kAudioDevicePropertyScopeInput )
861 handle->xrun[1] = true;
863 handle->xrun[0] = true;
867 return kAudioHardwareNoError;
870 static OSStatus rateListener( AudioObjectID inDevice,
871 UInt32 /*nAddresses*/,
872 const AudioObjectPropertyAddress /*properties*/[],
875 Float64 *rate = (Float64 *) ratePointer;
876 UInt32 dataSize = sizeof( Float64 );
877 AudioObjectPropertyAddress property = { kAudioDevicePropertyNominalSampleRate,
878 kAudioObjectPropertyScopeGlobal,
879 kAudioObjectPropertyElementMaster };
880 AudioObjectGetPropertyData( inDevice, &property, 0, NULL, &dataSize, rate );
881 return kAudioHardwareNoError;
884 bool RtApiCore :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
885 unsigned int firstChannel, unsigned int sampleRate,
886 RtAudioFormat format, unsigned int *bufferSize,
887 RtAudio::StreamOptions *options )
890 unsigned int nDevices = getDeviceCount();
891 if ( nDevices == 0 ) {
892 // This should not happen because a check is made before this function is called.
893 errorText_ = "RtApiCore::probeDeviceOpen: no devices found!";
897 if ( device >= nDevices ) {
898 // This should not happen because a check is made before this function is called.
899 errorText_ = "RtApiCore::probeDeviceOpen: device ID is invalid!";
903 AudioDeviceID deviceList[ nDevices ];
904 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
905 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
906 kAudioObjectPropertyScopeGlobal,
907 kAudioObjectPropertyElementMaster };
908 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
909 0, NULL, &dataSize, (void *) &deviceList );
910 if ( result != noErr ) {
911 errorText_ = "RtApiCore::probeDeviceOpen: OS-X system error getting device IDs.";
915 AudioDeviceID id = deviceList[ device ];
917 // Setup for stream mode.
918 bool isInput = false;
919 if ( mode == INPUT ) {
921 property.mScope = kAudioDevicePropertyScopeInput;
924 property.mScope = kAudioDevicePropertyScopeOutput;
926 // Get the stream "configuration".
927 AudioBufferList *bufferList = nil;
929 property.mSelector = kAudioDevicePropertyStreamConfiguration;
930 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
931 if ( result != noErr || dataSize == 0 ) {
932 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration info for device (" << device << ").";
933 errorText_ = errorStream_.str();
937 // Allocate the AudioBufferList.
938 bufferList = (AudioBufferList *) malloc( dataSize );
939 if ( bufferList == NULL ) {
940 errorText_ = "RtApiCore::probeDeviceOpen: memory error allocating AudioBufferList.";
944 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
945 if (result != noErr || dataSize == 0) {
947 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration for device (" << device << ").";
948 errorText_ = errorStream_.str();
952 // Search for one or more streams that contain the desired number of
953 // channels. CoreAudio devices can have an arbitrary number of
954 // streams and each stream can have an arbitrary number of channels.
955 // For each stream, a single buffer of interleaved samples is
956 // provided. RtAudio prefers the use of one stream of interleaved
957 // data or multiple consecutive single-channel streams. However, we
958 // now support multiple consecutive multi-channel streams of
959 // interleaved data as well.
960 UInt32 iStream, offsetCounter = firstChannel;
961 UInt32 nStreams = bufferList->mNumberBuffers;
962 bool monoMode = false;
963 bool foundStream = false;
965 // First check that the device supports the requested number of
967 UInt32 deviceChannels = 0;
968 for ( iStream=0; iStream<nStreams; iStream++ )
969 deviceChannels += bufferList->mBuffers[iStream].mNumberChannels;
971 if ( deviceChannels < ( channels + firstChannel ) ) {
973 errorStream_ << "RtApiCore::probeDeviceOpen: the device (" << device << ") does not support the requested channel count.";
974 errorText_ = errorStream_.str();
978 // Look for a single stream meeting our needs.
979 UInt32 firstStream, streamCount = 1, streamChannels = 0, channelOffset = 0;
980 for ( iStream=0; iStream<nStreams; iStream++ ) {
981 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
982 if ( streamChannels >= channels + offsetCounter ) {
983 firstStream = iStream;
984 channelOffset = offsetCounter;
988 if ( streamChannels > offsetCounter ) break;
989 offsetCounter -= streamChannels;
992 // If we didn't find a single stream above, then we should be able
993 // to meet the channel specification with multiple streams.
994 if ( foundStream == false ) {
996 offsetCounter = firstChannel;
997 for ( iStream=0; iStream<nStreams; iStream++ ) {
998 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
999 if ( streamChannels > offsetCounter ) break;
1000 offsetCounter -= streamChannels;
1003 firstStream = iStream;
1004 channelOffset = offsetCounter;
1005 Int32 channelCounter = channels + offsetCounter - streamChannels;
1007 if ( streamChannels > 1 ) monoMode = false;
1008 while ( channelCounter > 0 ) {
1009 streamChannels = bufferList->mBuffers[++iStream].mNumberChannels;
1010 if ( streamChannels > 1 ) monoMode = false;
1011 channelCounter -= streamChannels;
1018 // Determine the buffer size.
1019 AudioValueRange bufferRange;
1020 dataSize = sizeof( AudioValueRange );
1021 property.mSelector = kAudioDevicePropertyBufferFrameSizeRange;
1022 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &bufferRange );
1024 if ( result != noErr ) {
1025 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting buffer size range for device (" << device << ").";
1026 errorText_ = errorStream_.str();
1030 if ( bufferRange.mMinimum > *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMinimum;
1031 else if ( bufferRange.mMaximum < *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMaximum;
1032 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) *bufferSize = (unsigned long) bufferRange.mMinimum;
1034 // Set the buffer size. For multiple streams, I'm assuming we only
1035 // need to make this setting for the master channel.
1036 UInt32 theSize = (UInt32) *bufferSize;
1037 dataSize = sizeof( UInt32 );
1038 property.mSelector = kAudioDevicePropertyBufferFrameSize;
1039 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &theSize );
1041 if ( result != noErr ) {
1042 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting the buffer size for device (" << device << ").";
1043 errorText_ = errorStream_.str();
1047 // If attempting to setup a duplex stream, the bufferSize parameter
1048 // MUST be the same in both directions!
1049 *bufferSize = theSize;
1050 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
1051 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << device << ").";
1052 errorText_ = errorStream_.str();
1056 stream_.bufferSize = *bufferSize;
1057 stream_.nBuffers = 1;
1059 // Try to set "hog" mode ... it's not clear to me this is working.
1060 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) {
1062 dataSize = sizeof( hog_pid );
1063 property.mSelector = kAudioDevicePropertyHogMode;
1064 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &hog_pid );
1065 if ( result != noErr ) {
1066 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting 'hog' state!";
1067 errorText_ = errorStream_.str();
1071 if ( hog_pid != getpid() ) {
1073 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &hog_pid );
1074 if ( result != noErr ) {
1075 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting 'hog' state!";
1076 errorText_ = errorStream_.str();
1082 // Check and if necessary, change the sample rate for the device.
1083 Float64 nominalRate;
1084 dataSize = sizeof( Float64 );
1085 property.mSelector = kAudioDevicePropertyNominalSampleRate;
1086 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &nominalRate );
1087 if ( result != noErr ) {
1088 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting current sample rate.";
1089 errorText_ = errorStream_.str();
1093 // Only change the sample rate if off by more than 1 Hz.
1094 if ( fabs( nominalRate - (double)sampleRate ) > 1.0 ) {
1096 // Set a property listener for the sample rate change
1097 Float64 reportedRate = 0.0;
1098 AudioObjectPropertyAddress tmp = { kAudioDevicePropertyNominalSampleRate, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
1099 result = AudioObjectAddPropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1100 if ( result != noErr ) {
1101 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate property listener for device (" << device << ").";
1102 errorText_ = errorStream_.str();
1106 nominalRate = (Float64) sampleRate;
1107 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &nominalRate );
1108 if ( result != noErr ) {
1109 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1110 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate for device (" << device << ").";
1111 errorText_ = errorStream_.str();
1115 // Now wait until the reported nominal rate is what we just set.
1116 UInt32 microCounter = 0;
1117 while ( reportedRate != nominalRate ) {
1118 microCounter += 5000;
1119 if ( microCounter > 5000000 ) break;
1123 // Remove the property listener.
1124 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1126 if ( microCounter > 5000000 ) {
1127 errorStream_ << "RtApiCore::probeDeviceOpen: timeout waiting for sample rate update for device (" << device << ").";
1128 errorText_ = errorStream_.str();
1133 // Now set the stream format for all streams. Also, check the
1134 // physical format of the device and change that if necessary.
1135 AudioStreamBasicDescription description;
1136 dataSize = sizeof( AudioStreamBasicDescription );
1137 property.mSelector = kAudioStreamPropertyVirtualFormat;
1138 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
1139 if ( result != noErr ) {
1140 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream format for device (" << device << ").";
1141 errorText_ = errorStream_.str();
1145 // Set the sample rate and data format id. However, only make the
1146 // change if the sample rate is not within 1.0 of the desired
1147 // rate and the format is not linear pcm.
1148 bool updateFormat = false;
1149 if ( fabs( description.mSampleRate - (Float64)sampleRate ) > 1.0 ) {
1150 description.mSampleRate = (Float64) sampleRate;
1151 updateFormat = true;
1154 if ( description.mFormatID != kAudioFormatLinearPCM ) {
1155 description.mFormatID = kAudioFormatLinearPCM;
1156 updateFormat = true;
1159 if ( updateFormat ) {
1160 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &description );
1161 if ( result != noErr ) {
1162 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate or data format for device (" << device << ").";
1163 errorText_ = errorStream_.str();
1168 // Now check the physical format.
1169 property.mSelector = kAudioStreamPropertyPhysicalFormat;
1170 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
1171 if ( result != noErr ) {
1172 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream physical format for device (" << device << ").";
1173 errorText_ = errorStream_.str();
1177 //std::cout << "Current physical stream format:" << std::endl;
1178 //std::cout << " mBitsPerChan = " << description.mBitsPerChannel << std::endl;
1179 //std::cout << " aligned high = " << (description.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (description.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
1180 //std::cout << " bytesPerFrame = " << description.mBytesPerFrame << std::endl;
1181 //std::cout << " sample rate = " << description.mSampleRate << std::endl;
1183 if ( description.mFormatID != kAudioFormatLinearPCM || description.mBitsPerChannel < 16 ) {
1184 description.mFormatID = kAudioFormatLinearPCM;
1185 //description.mSampleRate = (Float64) sampleRate;
1186 AudioStreamBasicDescription testDescription = description;
1189 // We'll try higher bit rates first and then work our way down.
1190 std::vector< std::pair<UInt32, UInt32> > physicalFormats;
1191 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsFloat) & ~kLinearPCMFormatFlagIsSignedInteger;
1192 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
1193 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
1194 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
1195 physicalFormats.push_back( std::pair<Float32, UInt32>( 24, formatFlags ) ); // 24-bit packed
1196 formatFlags &= ~( kAudioFormatFlagIsPacked | kAudioFormatFlagIsAlignedHigh );
1197 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.2, formatFlags ) ); // 24-bit in 4 bytes, aligned low
1198 formatFlags |= kAudioFormatFlagIsAlignedHigh;
1199 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.4, formatFlags ) ); // 24-bit in 4 bytes, aligned high
1200 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
1201 physicalFormats.push_back( std::pair<Float32, UInt32>( 16, formatFlags ) );
1202 physicalFormats.push_back( std::pair<Float32, UInt32>( 8, formatFlags ) );
1204 bool setPhysicalFormat = false;
1205 for( unsigned int i=0; i<physicalFormats.size(); i++ ) {
1206 testDescription = description;
1207 testDescription.mBitsPerChannel = (UInt32) physicalFormats[i].first;
1208 testDescription.mFormatFlags = physicalFormats[i].second;
1209 if ( (24 == (UInt32)physicalFormats[i].first) && ~( physicalFormats[i].second & kAudioFormatFlagIsPacked ) )
1210 testDescription.mBytesPerFrame = 4 * testDescription.mChannelsPerFrame;
1212 testDescription.mBytesPerFrame = testDescription.mBitsPerChannel/8 * testDescription.mChannelsPerFrame;
1213 testDescription.mBytesPerPacket = testDescription.mBytesPerFrame * testDescription.mFramesPerPacket;
1214 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &testDescription );
1215 if ( result == noErr ) {
1216 setPhysicalFormat = true;
1217 //std::cout << "Updated physical stream format:" << std::endl;
1218 //std::cout << " mBitsPerChan = " << testDescription.mBitsPerChannel << std::endl;
1219 //std::cout << " aligned high = " << (testDescription.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (testDescription.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
1220 //std::cout << " bytesPerFrame = " << testDescription.mBytesPerFrame << std::endl;
1221 //std::cout << " sample rate = " << testDescription.mSampleRate << std::endl;
1226 if ( !setPhysicalFormat ) {
1227 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting physical data format for device (" << device << ").";
1228 errorText_ = errorStream_.str();
1231 } // done setting virtual/physical formats.
1233 // Get the stream / device latency.
1235 dataSize = sizeof( UInt32 );
1236 property.mSelector = kAudioDevicePropertyLatency;
1237 if ( AudioObjectHasProperty( id, &property ) == true ) {
1238 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &latency );
1239 if ( result == kAudioHardwareNoError ) stream_.latency[ mode ] = latency;
1241 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting device latency for device (" << device << ").";
1242 errorText_ = errorStream_.str();
1243 error( RtAudioError::WARNING );
1247 // Byte-swapping: According to AudioHardware.h, the stream data will
1248 // always be presented in native-endian format, so we should never
1249 // need to byte swap.
1250 stream_.doByteSwap[mode] = false;
1252 // From the CoreAudio documentation, PCM data must be supplied as
1254 stream_.userFormat = format;
1255 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
1257 if ( streamCount == 1 )
1258 stream_.nDeviceChannels[mode] = description.mChannelsPerFrame;
1259 else // multiple streams
1260 stream_.nDeviceChannels[mode] = channels;
1261 stream_.nUserChannels[mode] = channels;
1262 stream_.channelOffset[mode] = channelOffset; // offset within a CoreAudio stream
1263 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
1264 else stream_.userInterleaved = true;
1265 stream_.deviceInterleaved[mode] = true;
1266 if ( monoMode == true ) stream_.deviceInterleaved[mode] = false;
1268 // Set flags for buffer conversion.
1269 stream_.doConvertBuffer[mode] = false;
1270 if ( stream_.userFormat != stream_.deviceFormat[mode] )
1271 stream_.doConvertBuffer[mode] = true;
1272 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
1273 stream_.doConvertBuffer[mode] = true;
1274 if ( streamCount == 1 ) {
1275 if ( stream_.nUserChannels[mode] > 1 &&
1276 stream_.userInterleaved != stream_.deviceInterleaved[mode] )
1277 stream_.doConvertBuffer[mode] = true;
1279 else if ( monoMode && stream_.userInterleaved )
1280 stream_.doConvertBuffer[mode] = true;
1282 // Allocate our CoreHandle structure for the stream.
1283 CoreHandle *handle = 0;
1284 if ( stream_.apiHandle == 0 ) {
1286 handle = new CoreHandle;
1288 catch ( std::bad_alloc& ) {
1289 errorText_ = "RtApiCore::probeDeviceOpen: error allocating CoreHandle memory.";
1293 if ( pthread_cond_init( &handle->condition, NULL ) ) {
1294 errorText_ = "RtApiCore::probeDeviceOpen: error initializing pthread condition variable.";
1297 stream_.apiHandle = (void *) handle;
1300 handle = (CoreHandle *) stream_.apiHandle;
1301 handle->iStream[mode] = firstStream;
1302 handle->nStreams[mode] = streamCount;
1303 handle->id[mode] = id;
1305 // Allocate necessary internal buffers.
1306 unsigned long bufferBytes;
1307 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
1308 // stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
1309 stream_.userBuffer[mode] = (char *) malloc( bufferBytes * sizeof(char) );
1310 memset( stream_.userBuffer[mode], 0, bufferBytes * sizeof(char) );
1311 if ( stream_.userBuffer[mode] == NULL ) {
1312 errorText_ = "RtApiCore::probeDeviceOpen: error allocating user buffer memory.";
1316 // If possible, we will make use of the CoreAudio stream buffers as
1317 // "device buffers". However, we can't do this if using multiple
1319 if ( stream_.doConvertBuffer[mode] && handle->nStreams[mode] > 1 ) {
1321 bool makeBuffer = true;
1322 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
1323 if ( mode == INPUT ) {
1324 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
1325 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
1326 if ( bufferBytes <= bytesOut ) makeBuffer = false;
1331 bufferBytes *= *bufferSize;
1332 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
1333 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
1334 if ( stream_.deviceBuffer == NULL ) {
1335 errorText_ = "RtApiCore::probeDeviceOpen: error allocating device buffer memory.";
1341 stream_.sampleRate = sampleRate;
1342 stream_.device[mode] = device;
1343 stream_.state = STREAM_STOPPED;
1344 stream_.callbackInfo.object = (void *) this;
1346 // Setup the buffer conversion information structure.
1347 if ( stream_.doConvertBuffer[mode] ) {
1348 if ( streamCount > 1 ) setConvertInfo( mode, 0 );
1349 else setConvertInfo( mode, channelOffset );
1352 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device )
1353 // Only one callback procedure per device.
1354 stream_.mode = DUPLEX;
1356 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1357 result = AudioDeviceCreateIOProcID( id, callbackHandler, (void *) &stream_.callbackInfo, &handle->procId[mode] );
1359 // deprecated in favor of AudioDeviceCreateIOProcID()
1360 result = AudioDeviceAddIOProc( id, callbackHandler, (void *) &stream_.callbackInfo );
1362 if ( result != noErr ) {
1363 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting callback for device (" << device << ").";
1364 errorText_ = errorStream_.str();
1367 if ( stream_.mode == OUTPUT && mode == INPUT )
1368 stream_.mode = DUPLEX;
1370 stream_.mode = mode;
1373 // Setup the device property listener for over/underload.
1374 property.mSelector = kAudioDeviceProcessorOverload;
1375 property.mScope = kAudioObjectPropertyScopeGlobal;
1376 result = AudioObjectAddPropertyListener( id, &property, xrunListener, (void *) handle );
1382 pthread_cond_destroy( &handle->condition );
1384 stream_.apiHandle = 0;
1387 for ( int i=0; i<2; i++ ) {
1388 if ( stream_.userBuffer[i] ) {
1389 free( stream_.userBuffer[i] );
1390 stream_.userBuffer[i] = 0;
1394 if ( stream_.deviceBuffer ) {
1395 free( stream_.deviceBuffer );
1396 stream_.deviceBuffer = 0;
1399 stream_.state = STREAM_CLOSED;
1403 void RtApiCore :: closeStream( void )
1405 if ( stream_.state == STREAM_CLOSED ) {
1406 errorText_ = "RtApiCore::closeStream(): no open stream to close!";
1407 error( RtAudioError::WARNING );
1411 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1412 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1414 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
1415 kAudioObjectPropertyScopeGlobal,
1416 kAudioObjectPropertyElementMaster };
1418 property.mSelector = kAudioDeviceProcessorOverload;
1419 property.mScope = kAudioObjectPropertyScopeGlobal;
1420 if (AudioObjectRemovePropertyListener( handle->id[0], &property, xrunListener, (void *) handle ) != noErr) {
1421 errorText_ = "RtApiCore::closeStream(): error removing property listener!";
1422 error( RtAudioError::WARNING );
1425 if ( stream_.state == STREAM_RUNNING )
1426 AudioDeviceStop( handle->id[0], callbackHandler );
1427 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1428 AudioDeviceDestroyIOProcID( handle->id[0], handle->procId[0] );
1430 // deprecated in favor of AudioDeviceDestroyIOProcID()
1431 AudioDeviceRemoveIOProc( handle->id[0], callbackHandler );
1435 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1437 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
1438 kAudioObjectPropertyScopeGlobal,
1439 kAudioObjectPropertyElementMaster };
1441 property.mSelector = kAudioDeviceProcessorOverload;
1442 property.mScope = kAudioObjectPropertyScopeGlobal;
1443 if (AudioObjectRemovePropertyListener( handle->id[1], &property, xrunListener, (void *) handle ) != noErr) {
1444 errorText_ = "RtApiCore::closeStream(): error removing property listener!";
1445 error( RtAudioError::WARNING );
1448 if ( stream_.state == STREAM_RUNNING )
1449 AudioDeviceStop( handle->id[1], callbackHandler );
1450 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1451 AudioDeviceDestroyIOProcID( handle->id[1], handle->procId[1] );
1453 // deprecated in favor of AudioDeviceDestroyIOProcID()
1454 AudioDeviceRemoveIOProc( handle->id[1], callbackHandler );
1458 for ( int i=0; i<2; i++ ) {
1459 if ( stream_.userBuffer[i] ) {
1460 free( stream_.userBuffer[i] );
1461 stream_.userBuffer[i] = 0;
1465 if ( stream_.deviceBuffer ) {
1466 free( stream_.deviceBuffer );
1467 stream_.deviceBuffer = 0;
1470 // Destroy pthread condition variable.
1471 pthread_cond_destroy( &handle->condition );
1473 stream_.apiHandle = 0;
1475 stream_.mode = UNINITIALIZED;
1476 stream_.state = STREAM_CLOSED;
1479 void RtApiCore :: startStream( void )
1482 if ( stream_.state == STREAM_RUNNING ) {
1483 errorText_ = "RtApiCore::startStream(): the stream is already running!";
1484 error( RtAudioError::WARNING );
1488 OSStatus result = noErr;
1489 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1490 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1492 result = AudioDeviceStart( handle->id[0], callbackHandler );
1493 if ( result != noErr ) {
1494 errorStream_ << "RtApiCore::startStream: system error (" << getErrorCode( result ) << ") starting callback procedure on device (" << stream_.device[0] << ").";
1495 errorText_ = errorStream_.str();
1500 if ( stream_.mode == INPUT ||
1501 ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1503 result = AudioDeviceStart( handle->id[1], callbackHandler );
1504 if ( result != noErr ) {
1505 errorStream_ << "RtApiCore::startStream: system error starting input callback procedure on device (" << stream_.device[1] << ").";
1506 errorText_ = errorStream_.str();
1511 handle->drainCounter = 0;
1512 handle->internalDrain = false;
1513 stream_.state = STREAM_RUNNING;
1516 if ( result == noErr ) return;
1517 error( RtAudioError::SYSTEM_ERROR );
1520 void RtApiCore :: stopStream( void )
1523 if ( stream_.state == STREAM_STOPPED ) {
1524 errorText_ = "RtApiCore::stopStream(): the stream is already stopped!";
1525 error( RtAudioError::WARNING );
1529 OSStatus result = noErr;
1530 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1531 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1533 if ( handle->drainCounter == 0 ) {
1534 handle->drainCounter = 2;
1535 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
1538 result = AudioDeviceStop( handle->id[0], callbackHandler );
1539 if ( result != noErr ) {
1540 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping callback procedure on device (" << stream_.device[0] << ").";
1541 errorText_ = errorStream_.str();
1546 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1548 result = AudioDeviceStop( handle->id[1], callbackHandler );
1549 if ( result != noErr ) {
1550 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping input callback procedure on device (" << stream_.device[1] << ").";
1551 errorText_ = errorStream_.str();
1556 stream_.state = STREAM_STOPPED;
1559 if ( result == noErr ) return;
1560 error( RtAudioError::SYSTEM_ERROR );
1563 void RtApiCore :: abortStream( void )
1566 if ( stream_.state == STREAM_STOPPED ) {
1567 errorText_ = "RtApiCore::abortStream(): the stream is already stopped!";
1568 error( RtAudioError::WARNING );
1572 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1573 handle->drainCounter = 2;
1578 // This function will be called by a spawned thread when the user
1579 // callback function signals that the stream should be stopped or
1580 // aborted. It is better to handle it this way because the
1581 // callbackEvent() function probably should return before the AudioDeviceStop()
1582 // function is called.
1583 static void *coreStopStream( void *ptr )
1585 CallbackInfo *info = (CallbackInfo *) ptr;
1586 RtApiCore *object = (RtApiCore *) info->object;
1588 object->stopStream();
1589 pthread_exit( NULL );
1592 bool RtApiCore :: callbackEvent( AudioDeviceID deviceId,
1593 const AudioBufferList *inBufferList,
1594 const AudioBufferList *outBufferList )
1596 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
1597 if ( stream_.state == STREAM_CLOSED ) {
1598 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
1599 error( RtAudioError::WARNING );
1603 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
1604 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1606 // Check if we were draining the stream and signal is finished.
1607 if ( handle->drainCounter > 3 ) {
1608 ThreadHandle threadId;
1610 stream_.state = STREAM_STOPPING;
1611 if ( handle->internalDrain == true )
1612 pthread_create( &threadId, NULL, coreStopStream, info );
1613 else // external call to stopStream()
1614 pthread_cond_signal( &handle->condition );
1618 AudioDeviceID outputDevice = handle->id[0];
1620 // Invoke user callback to get fresh output data UNLESS we are
1621 // draining stream or duplex mode AND the input/output devices are
1622 // different AND this function is called for the input device.
1623 if ( handle->drainCounter == 0 && ( stream_.mode != DUPLEX || deviceId == outputDevice ) ) {
1624 RtAudioCallback callback = (RtAudioCallback) info->callback;
1625 double streamTime = getStreamTime();
1626 RtAudioStreamStatus status = 0;
1627 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
1628 status |= RTAUDIO_OUTPUT_UNDERFLOW;
1629 handle->xrun[0] = false;
1631 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
1632 status |= RTAUDIO_INPUT_OVERFLOW;
1633 handle->xrun[1] = false;
1636 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
1637 stream_.bufferSize, streamTime, status, info->userData );
1638 if ( cbReturnValue == 2 ) {
1639 stream_.state = STREAM_STOPPING;
1640 handle->drainCounter = 2;
1644 else if ( cbReturnValue == 1 ) {
1645 handle->drainCounter = 1;
1646 handle->internalDrain = true;
1650 if ( stream_.mode == OUTPUT || ( stream_.mode == DUPLEX && deviceId == outputDevice ) ) {
1652 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
1654 if ( handle->nStreams[0] == 1 ) {
1655 memset( outBufferList->mBuffers[handle->iStream[0]].mData,
1657 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
1659 else { // fill multiple streams with zeros
1660 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
1661 memset( outBufferList->mBuffers[handle->iStream[0]+i].mData,
1663 outBufferList->mBuffers[handle->iStream[0]+i].mDataByteSize );
1667 else if ( handle->nStreams[0] == 1 ) {
1668 if ( stream_.doConvertBuffer[0] ) { // convert directly to CoreAudio stream buffer
1669 convertBuffer( (char *) outBufferList->mBuffers[handle->iStream[0]].mData,
1670 stream_.userBuffer[0], stream_.convertInfo[0] );
1672 else { // copy from user buffer
1673 memcpy( outBufferList->mBuffers[handle->iStream[0]].mData,
1674 stream_.userBuffer[0],
1675 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
1678 else { // fill multiple streams
1679 Float32 *inBuffer = (Float32 *) stream_.userBuffer[0];
1680 if ( stream_.doConvertBuffer[0] ) {
1681 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
1682 inBuffer = (Float32 *) stream_.deviceBuffer;
1685 if ( stream_.deviceInterleaved[0] == false ) { // mono mode
1686 UInt32 bufferBytes = outBufferList->mBuffers[handle->iStream[0]].mDataByteSize;
1687 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
1688 memcpy( outBufferList->mBuffers[handle->iStream[0]+i].mData,
1689 (void *)&inBuffer[i*stream_.bufferSize], bufferBytes );
1692 else { // fill multiple multi-channel streams with interleaved data
1693 UInt32 streamChannels, channelsLeft, inJump, outJump, inOffset;
1696 bool inInterleaved = ( stream_.userInterleaved ) ? true : false;
1697 UInt32 inChannels = stream_.nUserChannels[0];
1698 if ( stream_.doConvertBuffer[0] ) {
1699 inInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
1700 inChannels = stream_.nDeviceChannels[0];
1703 if ( inInterleaved ) inOffset = 1;
1704 else inOffset = stream_.bufferSize;
1706 channelsLeft = inChannels;
1707 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
1709 out = (Float32 *) outBufferList->mBuffers[handle->iStream[0]+i].mData;
1710 streamChannels = outBufferList->mBuffers[handle->iStream[0]+i].mNumberChannels;
1713 // Account for possible channel offset in first stream
1714 if ( i == 0 && stream_.channelOffset[0] > 0 ) {
1715 streamChannels -= stream_.channelOffset[0];
1716 outJump = stream_.channelOffset[0];
1720 // Account for possible unfilled channels at end of the last stream
1721 if ( streamChannels > channelsLeft ) {
1722 outJump = streamChannels - channelsLeft;
1723 streamChannels = channelsLeft;
1726 // Determine input buffer offsets and skips
1727 if ( inInterleaved ) {
1728 inJump = inChannels;
1729 in += inChannels - channelsLeft;
1733 in += (inChannels - channelsLeft) * inOffset;
1736 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
1737 for ( unsigned int j=0; j<streamChannels; j++ ) {
1738 *out++ = in[j*inOffset];
1743 channelsLeft -= streamChannels;
1749 // Don't bother draining input
1750 if ( handle->drainCounter ) {
1751 handle->drainCounter++;
1755 AudioDeviceID inputDevice;
1756 inputDevice = handle->id[1];
1757 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && deviceId == inputDevice ) ) {
1759 if ( handle->nStreams[1] == 1 ) {
1760 if ( stream_.doConvertBuffer[1] ) { // convert directly from CoreAudio stream buffer
1761 convertBuffer( stream_.userBuffer[1],
1762 (char *) inBufferList->mBuffers[handle->iStream[1]].mData,
1763 stream_.convertInfo[1] );
1765 else { // copy to user buffer
1766 memcpy( stream_.userBuffer[1],
1767 inBufferList->mBuffers[handle->iStream[1]].mData,
1768 inBufferList->mBuffers[handle->iStream[1]].mDataByteSize );
1771 else { // read from multiple streams
1772 Float32 *outBuffer = (Float32 *) stream_.userBuffer[1];
1773 if ( stream_.doConvertBuffer[1] ) outBuffer = (Float32 *) stream_.deviceBuffer;
1775 if ( stream_.deviceInterleaved[1] == false ) { // mono mode
1776 UInt32 bufferBytes = inBufferList->mBuffers[handle->iStream[1]].mDataByteSize;
1777 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
1778 memcpy( (void *)&outBuffer[i*stream_.bufferSize],
1779 inBufferList->mBuffers[handle->iStream[1]+i].mData, bufferBytes );
1782 else { // read from multiple multi-channel streams
1783 UInt32 streamChannels, channelsLeft, inJump, outJump, outOffset;
1786 bool outInterleaved = ( stream_.userInterleaved ) ? true : false;
1787 UInt32 outChannels = stream_.nUserChannels[1];
1788 if ( stream_.doConvertBuffer[1] ) {
1789 outInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
1790 outChannels = stream_.nDeviceChannels[1];
1793 if ( outInterleaved ) outOffset = 1;
1794 else outOffset = stream_.bufferSize;
1796 channelsLeft = outChannels;
1797 for ( unsigned int i=0; i<handle->nStreams[1]; i++ ) {
1799 in = (Float32 *) inBufferList->mBuffers[handle->iStream[1]+i].mData;
1800 streamChannels = inBufferList->mBuffers[handle->iStream[1]+i].mNumberChannels;
1803 // Account for possible channel offset in first stream
1804 if ( i == 0 && stream_.channelOffset[1] > 0 ) {
1805 streamChannels -= stream_.channelOffset[1];
1806 inJump = stream_.channelOffset[1];
1810 // Account for possible unread channels at end of the last stream
1811 if ( streamChannels > channelsLeft ) {
1812 inJump = streamChannels - channelsLeft;
1813 streamChannels = channelsLeft;
1816 // Determine output buffer offsets and skips
1817 if ( outInterleaved ) {
1818 outJump = outChannels;
1819 out += outChannels - channelsLeft;
1823 out += (outChannels - channelsLeft) * outOffset;
1826 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
1827 for ( unsigned int j=0; j<streamChannels; j++ ) {
1828 out[j*outOffset] = *in++;
1833 channelsLeft -= streamChannels;
1837 if ( stream_.doConvertBuffer[1] ) { // convert from our internal "device" buffer
1838 convertBuffer( stream_.userBuffer[1],
1839 stream_.deviceBuffer,
1840 stream_.convertInfo[1] );
1846 //MUTEX_UNLOCK( &stream_.mutex );
1848 RtApi::tickStreamTime();
1852 const char* RtApiCore :: getErrorCode( OSStatus code )
1856 case kAudioHardwareNotRunningError:
1857 return "kAudioHardwareNotRunningError";
1859 case kAudioHardwareUnspecifiedError:
1860 return "kAudioHardwareUnspecifiedError";
1862 case kAudioHardwareUnknownPropertyError:
1863 return "kAudioHardwareUnknownPropertyError";
1865 case kAudioHardwareBadPropertySizeError:
1866 return "kAudioHardwareBadPropertySizeError";
1868 case kAudioHardwareIllegalOperationError:
1869 return "kAudioHardwareIllegalOperationError";
1871 case kAudioHardwareBadObjectError:
1872 return "kAudioHardwareBadObjectError";
1874 case kAudioHardwareBadDeviceError:
1875 return "kAudioHardwareBadDeviceError";
1877 case kAudioHardwareBadStreamError:
1878 return "kAudioHardwareBadStreamError";
1880 case kAudioHardwareUnsupportedOperationError:
1881 return "kAudioHardwareUnsupportedOperationError";
1883 case kAudioDeviceUnsupportedFormatError:
1884 return "kAudioDeviceUnsupportedFormatError";
1886 case kAudioDevicePermissionsError:
1887 return "kAudioDevicePermissionsError";
1890 return "CoreAudio unknown error";
1894 //******************** End of __MACOSX_CORE__ *********************//
1897 #if defined(__UNIX_JACK__)
1899 // JACK is a low-latency audio server, originally written for the
1900 // GNU/Linux operating system and now also ported to OS-X. It can
1901 // connect a number of different applications to an audio device, as
1902 // well as allowing them to share audio between themselves.
1904 // When using JACK with RtAudio, "devices" refer to JACK clients that
1905 // have ports connected to the server. The JACK server is typically
1906 // started in a terminal as follows:
1908 // .jackd -d alsa -d hw:0
1910 // or through an interface program such as qjackctl. Many of the
1911 // parameters normally set for a stream are fixed by the JACK server
1912 // and can be specified when the JACK server is started. In
1915 // .jackd -d alsa -d hw:0 -r 44100 -p 512 -n 4
1917 // specifies a sample rate of 44100 Hz, a buffer size of 512 sample
1918 // frames, and number of buffers = 4. Once the server is running, it
1919 // is not possible to override these values. If the values are not
1920 // specified in the command-line, the JACK server uses default values.
1922 // The JACK server does not have to be running when an instance of
1923 // RtApiJack is created, though the function getDeviceCount() will
1924 // report 0 devices found until JACK has been started. When no
1925 // devices are available (i.e., the JACK server is not running), a
1926 // stream cannot be opened.
1928 #include <jack/jack.h>
1932 // A structure to hold various information related to the Jack API
1935 jack_client_t *client;
1936 jack_port_t **ports[2];
1937 std::string deviceName[2];
1939 pthread_cond_t condition;
1940 int drainCounter; // Tracks callback counts when draining
1941 bool internalDrain; // Indicates if stop is initiated from callback or not.
1944 :client(0), drainCounter(0), internalDrain(false) { ports[0] = 0; ports[1] = 0; xrun[0] = false; xrun[1] = false; }
1947 #if !defined(__RTAUDIO_DEBUG__)
1948 static void jackSilentError( const char * ) {};
1951 RtApiJack :: RtApiJack()
1952 :shouldAutoconnect_(true) {
1953 // Nothing to do here.
1954 #if !defined(__RTAUDIO_DEBUG__)
1955 // Turn off Jack's internal error reporting.
1956 jack_set_error_function( &jackSilentError );
1960 RtApiJack :: ~RtApiJack()
1962 if ( stream_.state != STREAM_CLOSED ) closeStream();
1965 unsigned int RtApiJack :: getDeviceCount( void )
1967 // See if we can become a jack client.
1968 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
1969 jack_status_t *status = NULL;
1970 jack_client_t *client = jack_client_open( "RtApiJackCount", options, status );
1971 if ( client == 0 ) return 0;
1974 std::string port, previousPort;
1975 unsigned int nChannels = 0, nDevices = 0;
1976 ports = jack_get_ports( client, NULL, NULL, 0 );
1978 // Parse the port names up to the first colon (:).
1981 port = (char *) ports[ nChannels ];
1982 iColon = port.find(":");
1983 if ( iColon != std::string::npos ) {
1984 port = port.substr( 0, iColon + 1 );
1985 if ( port != previousPort ) {
1987 previousPort = port;
1990 } while ( ports[++nChannels] );
1994 jack_client_close( client );
1998 RtAudio::DeviceInfo RtApiJack :: getDeviceInfo( unsigned int device )
2000 RtAudio::DeviceInfo info;
2001 info.probed = false;
2003 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption
2004 jack_status_t *status = NULL;
2005 jack_client_t *client = jack_client_open( "RtApiJackInfo", options, status );
2006 if ( client == 0 ) {
2007 errorText_ = "RtApiJack::getDeviceInfo: Jack server not found or connection error!";
2008 error( RtAudioError::WARNING );
2013 std::string port, previousPort;
2014 unsigned int nPorts = 0, nDevices = 0;
2015 ports = jack_get_ports( client, NULL, NULL, 0 );
2017 // Parse the port names up to the first colon (:).
2020 port = (char *) ports[ nPorts ];
2021 iColon = port.find(":");
2022 if ( iColon != std::string::npos ) {
2023 port = port.substr( 0, iColon );
2024 if ( port != previousPort ) {
2025 if ( nDevices == device ) info.name = port;
2027 previousPort = port;
2030 } while ( ports[++nPorts] );
2034 if ( device >= nDevices ) {
2035 jack_client_close( client );
2036 errorText_ = "RtApiJack::getDeviceInfo: device ID is invalid!";
2037 error( RtAudioError::INVALID_USE );
2041 // Get the current jack server sample rate.
2042 info.sampleRates.clear();
2044 info.preferredSampleRate = jack_get_sample_rate( client );
2045 info.sampleRates.push_back( info.preferredSampleRate );
2047 // Count the available ports containing the client name as device
2048 // channels. Jack "input ports" equal RtAudio output channels.
2049 unsigned int nChannels = 0;
2050 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsInput );
2052 while ( ports[ nChannels ] ) nChannels++;
2054 info.outputChannels = nChannels;
2057 // Jack "output ports" equal RtAudio input channels.
2059 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsOutput );
2061 while ( ports[ nChannels ] ) nChannels++;
2063 info.inputChannels = nChannels;
2066 if ( info.outputChannels == 0 && info.inputChannels == 0 ) {
2067 jack_client_close(client);
2068 errorText_ = "RtApiJack::getDeviceInfo: error determining Jack input/output channels!";
2069 error( RtAudioError::WARNING );
2073 // If device opens for both playback and capture, we determine the channels.
2074 if ( info.outputChannels > 0 && info.inputChannels > 0 )
2075 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
2077 // Jack always uses 32-bit floats.
2078 info.nativeFormats = RTAUDIO_FLOAT32;
2080 // Jack doesn't provide default devices so we'll use the first available one.
2081 if ( device == 0 && info.outputChannels > 0 )
2082 info.isDefaultOutput = true;
2083 if ( device == 0 && info.inputChannels > 0 )
2084 info.isDefaultInput = true;
2086 jack_client_close(client);
2091 static int jackCallbackHandler( jack_nframes_t nframes, void *infoPointer )
2093 CallbackInfo *info = (CallbackInfo *) infoPointer;
2095 RtApiJack *object = (RtApiJack *) info->object;
2096 if ( object->callbackEvent( (unsigned long) nframes ) == false ) return 1;
2101 // This function will be called by a spawned thread when the Jack
2102 // server signals that it is shutting down. It is necessary to handle
2103 // it this way because the jackShutdown() function must return before
2104 // the jack_deactivate() function (in closeStream()) will return.
2105 static void *jackCloseStream( void *ptr )
2107 CallbackInfo *info = (CallbackInfo *) ptr;
2108 RtApiJack *object = (RtApiJack *) info->object;
2110 object->closeStream();
2112 pthread_exit( NULL );
2114 static void jackShutdown( void *infoPointer )
2116 CallbackInfo *info = (CallbackInfo *) infoPointer;
2117 RtApiJack *object = (RtApiJack *) info->object;
2119 // Check current stream state. If stopped, then we'll assume this
2120 // was called as a result of a call to RtApiJack::stopStream (the
2121 // deactivation of a client handle causes this function to be called).
2122 // If not, we'll assume the Jack server is shutting down or some
2123 // other problem occurred and we should close the stream.
2124 if ( object->isStreamRunning() == false ) return;
2126 ThreadHandle threadId;
2127 pthread_create( &threadId, NULL, jackCloseStream, info );
2128 std::cerr << "\nRtApiJack: the Jack server is shutting down this client ... stream stopped and closed!!\n" << std::endl;
2131 static int jackXrun( void *infoPointer )
2133 JackHandle *handle = (JackHandle *) infoPointer;
2135 if ( handle->ports[0] ) handle->xrun[0] = true;
2136 if ( handle->ports[1] ) handle->xrun[1] = true;
2141 bool RtApiJack :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
2142 unsigned int firstChannel, unsigned int sampleRate,
2143 RtAudioFormat format, unsigned int *bufferSize,
2144 RtAudio::StreamOptions *options )
2146 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2148 // Look for jack server and try to become a client (only do once per stream).
2149 jack_client_t *client = 0;
2150 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) {
2151 jack_options_t jackoptions = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
2152 jack_status_t *status = NULL;
2153 if ( options && !options->streamName.empty() )
2154 client = jack_client_open( options->streamName.c_str(), jackoptions, status );
2156 client = jack_client_open( "RtApiJack", jackoptions, status );
2157 if ( client == 0 ) {
2158 errorText_ = "RtApiJack::probeDeviceOpen: Jack server not found or connection error!";
2159 error( RtAudioError::WARNING );
2164 // The handle must have been created on an earlier pass.
2165 client = handle->client;
2169 std::string port, previousPort, deviceName;
2170 unsigned int nPorts = 0, nDevices = 0;
2171 ports = jack_get_ports( client, NULL, NULL, 0 );
2173 // Parse the port names up to the first colon (:).
2176 port = (char *) ports[ nPorts ];
2177 iColon = port.find(":");
2178 if ( iColon != std::string::npos ) {
2179 port = port.substr( 0, iColon );
2180 if ( port != previousPort ) {
2181 if ( nDevices == device ) deviceName = port;
2183 previousPort = port;
2186 } while ( ports[++nPorts] );
2190 if ( device >= nDevices ) {
2191 errorText_ = "RtApiJack::probeDeviceOpen: device ID is invalid!";
2195 // Count the available ports containing the client name as device
2196 // channels. Jack "input ports" equal RtAudio output channels.
2197 unsigned int nChannels = 0;
2198 unsigned long flag = JackPortIsInput;
2199 if ( mode == INPUT ) flag = JackPortIsOutput;
2200 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
2202 while ( ports[ nChannels ] ) nChannels++;
2206 // Compare the jack ports for specified client to the requested number of channels.
2207 if ( nChannels < (channels + firstChannel) ) {
2208 errorStream_ << "RtApiJack::probeDeviceOpen: requested number of channels (" << channels << ") + offset (" << firstChannel << ") not found for specified device (" << device << ":" << deviceName << ").";
2209 errorText_ = errorStream_.str();
2213 // Check the jack server sample rate.
2214 unsigned int jackRate = jack_get_sample_rate( client );
2215 if ( sampleRate != jackRate ) {
2216 jack_client_close( client );
2217 errorStream_ << "RtApiJack::probeDeviceOpen: the requested sample rate (" << sampleRate << ") is different than the JACK server rate (" << jackRate << ").";
2218 errorText_ = errorStream_.str();
2221 stream_.sampleRate = jackRate;
2223 // Get the latency of the JACK port.
2224 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
2225 if ( ports[ firstChannel ] ) {
2227 jack_latency_callback_mode_t cbmode = (mode == INPUT ? JackCaptureLatency : JackPlaybackLatency);
2228 // the range (usually the min and max are equal)
2229 jack_latency_range_t latrange; latrange.min = latrange.max = 0;
2230 // get the latency range
2231 jack_port_get_latency_range( jack_port_by_name( client, ports[firstChannel] ), cbmode, &latrange );
2232 // be optimistic, use the min!
2233 stream_.latency[mode] = latrange.min;
2234 //stream_.latency[mode] = jack_port_get_latency( jack_port_by_name( client, ports[ firstChannel ] ) );
2238 // The jack server always uses 32-bit floating-point data.
2239 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
2240 stream_.userFormat = format;
2242 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
2243 else stream_.userInterleaved = true;
2245 // Jack always uses non-interleaved buffers.
2246 stream_.deviceInterleaved[mode] = false;
2248 // Jack always provides host byte-ordered data.
2249 stream_.doByteSwap[mode] = false;
2251 // Get the buffer size. The buffer size and number of buffers
2252 // (periods) is set when the jack server is started.
2253 stream_.bufferSize = (int) jack_get_buffer_size( client );
2254 *bufferSize = stream_.bufferSize;
2256 stream_.nDeviceChannels[mode] = channels;
2257 stream_.nUserChannels[mode] = channels;
2259 // Set flags for buffer conversion.
2260 stream_.doConvertBuffer[mode] = false;
2261 if ( stream_.userFormat != stream_.deviceFormat[mode] )
2262 stream_.doConvertBuffer[mode] = true;
2263 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
2264 stream_.nUserChannels[mode] > 1 )
2265 stream_.doConvertBuffer[mode] = true;
2267 // Allocate our JackHandle structure for the stream.
2268 if ( handle == 0 ) {
2270 handle = new JackHandle;
2272 catch ( std::bad_alloc& ) {
2273 errorText_ = "RtApiJack::probeDeviceOpen: error allocating JackHandle memory.";
2277 if ( pthread_cond_init(&handle->condition, NULL) ) {
2278 errorText_ = "RtApiJack::probeDeviceOpen: error initializing pthread condition variable.";
2281 stream_.apiHandle = (void *) handle;
2282 handle->client = client;
2284 handle->deviceName[mode] = deviceName;
2286 // Allocate necessary internal buffers.
2287 unsigned long bufferBytes;
2288 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
2289 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
2290 if ( stream_.userBuffer[mode] == NULL ) {
2291 errorText_ = "RtApiJack::probeDeviceOpen: error allocating user buffer memory.";
2295 if ( stream_.doConvertBuffer[mode] ) {
2297 bool makeBuffer = true;
2298 if ( mode == OUTPUT )
2299 bufferBytes = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
2300 else { // mode == INPUT
2301 bufferBytes = stream_.nDeviceChannels[1] * formatBytes( stream_.deviceFormat[1] );
2302 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
2303 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes(stream_.deviceFormat[0]);
2304 if ( bufferBytes < bytesOut ) makeBuffer = false;
2309 bufferBytes *= *bufferSize;
2310 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
2311 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
2312 if ( stream_.deviceBuffer == NULL ) {
2313 errorText_ = "RtApiJack::probeDeviceOpen: error allocating device buffer memory.";
2319 // Allocate memory for the Jack ports (channels) identifiers.
2320 handle->ports[mode] = (jack_port_t **) malloc ( sizeof (jack_port_t *) * channels );
2321 if ( handle->ports[mode] == NULL ) {
2322 errorText_ = "RtApiJack::probeDeviceOpen: error allocating port memory.";
2326 stream_.device[mode] = device;
2327 stream_.channelOffset[mode] = firstChannel;
2328 stream_.state = STREAM_STOPPED;
2329 stream_.callbackInfo.object = (void *) this;
2331 if ( stream_.mode == OUTPUT && mode == INPUT )
2332 // We had already set up the stream for output.
2333 stream_.mode = DUPLEX;
2335 stream_.mode = mode;
2336 jack_set_process_callback( handle->client, jackCallbackHandler, (void *) &stream_.callbackInfo );
2337 jack_set_xrun_callback( handle->client, jackXrun, (void *) &handle );
2338 jack_on_shutdown( handle->client, jackShutdown, (void *) &stream_.callbackInfo );
2341 // Register our ports.
2343 if ( mode == OUTPUT ) {
2344 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2345 snprintf( label, 64, "outport %d", i );
2346 handle->ports[0][i] = jack_port_register( handle->client, (const char *)label,
2347 JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput, 0 );
2351 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2352 snprintf( label, 64, "inport %d", i );
2353 handle->ports[1][i] = jack_port_register( handle->client, (const char *)label,
2354 JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput, 0 );
2358 // Setup the buffer conversion information structure. We don't use
2359 // buffers to do channel offsets, so we override that parameter
2361 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
2363 if ( options && options->flags & RTAUDIO_JACK_DONT_CONNECT ) shouldAutoconnect_ = false;
2369 pthread_cond_destroy( &handle->condition );
2370 jack_client_close( handle->client );
2372 if ( handle->ports[0] ) free( handle->ports[0] );
2373 if ( handle->ports[1] ) free( handle->ports[1] );
2376 stream_.apiHandle = 0;
2379 for ( int i=0; i<2; i++ ) {
2380 if ( stream_.userBuffer[i] ) {
2381 free( stream_.userBuffer[i] );
2382 stream_.userBuffer[i] = 0;
2386 if ( stream_.deviceBuffer ) {
2387 free( stream_.deviceBuffer );
2388 stream_.deviceBuffer = 0;
2394 void RtApiJack :: closeStream( void )
2396 if ( stream_.state == STREAM_CLOSED ) {
2397 errorText_ = "RtApiJack::closeStream(): no open stream to close!";
2398 error( RtAudioError::WARNING );
2402 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2405 if ( stream_.state == STREAM_RUNNING )
2406 jack_deactivate( handle->client );
2408 jack_client_close( handle->client );
2412 if ( handle->ports[0] ) free( handle->ports[0] );
2413 if ( handle->ports[1] ) free( handle->ports[1] );
2414 pthread_cond_destroy( &handle->condition );
2416 stream_.apiHandle = 0;
2419 for ( int i=0; i<2; i++ ) {
2420 if ( stream_.userBuffer[i] ) {
2421 free( stream_.userBuffer[i] );
2422 stream_.userBuffer[i] = 0;
2426 if ( stream_.deviceBuffer ) {
2427 free( stream_.deviceBuffer );
2428 stream_.deviceBuffer = 0;
2431 stream_.mode = UNINITIALIZED;
2432 stream_.state = STREAM_CLOSED;
2435 void RtApiJack :: startStream( void )
2438 if ( stream_.state == STREAM_RUNNING ) {
2439 errorText_ = "RtApiJack::startStream(): the stream is already running!";
2440 error( RtAudioError::WARNING );
2444 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2445 int result = jack_activate( handle->client );
2447 errorText_ = "RtApiJack::startStream(): unable to activate JACK client!";
2453 // Get the list of available ports.
2454 if ( shouldAutoconnect_ && (stream_.mode == OUTPUT || stream_.mode == DUPLEX) ) {
2456 ports = jack_get_ports( handle->client, handle->deviceName[0].c_str(), NULL, JackPortIsInput);
2457 if ( ports == NULL) {
2458 errorText_ = "RtApiJack::startStream(): error determining available JACK input ports!";
2462 // Now make the port connections. Since RtAudio wasn't designed to
2463 // allow the user to select particular channels of a device, we'll
2464 // just open the first "nChannels" ports with offset.
2465 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2467 if ( ports[ stream_.channelOffset[0] + i ] )
2468 result = jack_connect( handle->client, jack_port_name( handle->ports[0][i] ), ports[ stream_.channelOffset[0] + i ] );
2471 errorText_ = "RtApiJack::startStream(): error connecting output ports!";
2478 if ( shouldAutoconnect_ && (stream_.mode == INPUT || stream_.mode == DUPLEX) ) {
2480 ports = jack_get_ports( handle->client, handle->deviceName[1].c_str(), NULL, JackPortIsOutput );
2481 if ( ports == NULL) {
2482 errorText_ = "RtApiJack::startStream(): error determining available JACK output ports!";
2486 // Now make the port connections. See note above.
2487 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2489 if ( ports[ stream_.channelOffset[1] + i ] )
2490 result = jack_connect( handle->client, ports[ stream_.channelOffset[1] + i ], jack_port_name( handle->ports[1][i] ) );
2493 errorText_ = "RtApiJack::startStream(): error connecting input ports!";
2500 handle->drainCounter = 0;
2501 handle->internalDrain = false;
2502 stream_.state = STREAM_RUNNING;
2505 if ( result == 0 ) return;
2506 error( RtAudioError::SYSTEM_ERROR );
2509 void RtApiJack :: stopStream( void )
2512 if ( stream_.state == STREAM_STOPPED ) {
2513 errorText_ = "RtApiJack::stopStream(): the stream is already stopped!";
2514 error( RtAudioError::WARNING );
2518 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2519 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
2521 if ( handle->drainCounter == 0 ) {
2522 handle->drainCounter = 2;
2523 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
2527 jack_deactivate( handle->client );
2528 stream_.state = STREAM_STOPPED;
2531 void RtApiJack :: abortStream( void )
2534 if ( stream_.state == STREAM_STOPPED ) {
2535 errorText_ = "RtApiJack::abortStream(): the stream is already stopped!";
2536 error( RtAudioError::WARNING );
2540 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2541 handle->drainCounter = 2;
2546 // This function will be called by a spawned thread when the user
2547 // callback function signals that the stream should be stopped or
2548 // aborted. It is necessary to handle it this way because the
2549 // callbackEvent() function must return before the jack_deactivate()
2550 // function will return.
2551 static void *jackStopStream( void *ptr )
2553 CallbackInfo *info = (CallbackInfo *) ptr;
2554 RtApiJack *object = (RtApiJack *) info->object;
2556 object->stopStream();
2557 pthread_exit( NULL );
2560 bool RtApiJack :: callbackEvent( unsigned long nframes )
2562 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
2563 if ( stream_.state == STREAM_CLOSED ) {
2564 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
2565 error( RtAudioError::WARNING );
2568 if ( stream_.bufferSize != nframes ) {
2569 errorText_ = "RtApiCore::callbackEvent(): the JACK buffer size has changed ... cannot process!";
2570 error( RtAudioError::WARNING );
2574 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
2575 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2577 // Check if we were draining the stream and signal is finished.
2578 if ( handle->drainCounter > 3 ) {
2579 ThreadHandle threadId;
2581 stream_.state = STREAM_STOPPING;
2582 if ( handle->internalDrain == true )
2583 pthread_create( &threadId, NULL, jackStopStream, info );
2585 pthread_cond_signal( &handle->condition );
2589 // Invoke user callback first, to get fresh output data.
2590 if ( handle->drainCounter == 0 ) {
2591 RtAudioCallback callback = (RtAudioCallback) info->callback;
2592 double streamTime = getStreamTime();
2593 RtAudioStreamStatus status = 0;
2594 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
2595 status |= RTAUDIO_OUTPUT_UNDERFLOW;
2596 handle->xrun[0] = false;
2598 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
2599 status |= RTAUDIO_INPUT_OVERFLOW;
2600 handle->xrun[1] = false;
2602 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
2603 stream_.bufferSize, streamTime, status, info->userData );
2604 if ( cbReturnValue == 2 ) {
2605 stream_.state = STREAM_STOPPING;
2606 handle->drainCounter = 2;
2608 pthread_create( &id, NULL, jackStopStream, info );
2611 else if ( cbReturnValue == 1 ) {
2612 handle->drainCounter = 1;
2613 handle->internalDrain = true;
2617 jack_default_audio_sample_t *jackbuffer;
2618 unsigned long bufferBytes = nframes * sizeof( jack_default_audio_sample_t );
2619 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
2621 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
2623 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
2624 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2625 memset( jackbuffer, 0, bufferBytes );
2629 else if ( stream_.doConvertBuffer[0] ) {
2631 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
2633 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
2634 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2635 memcpy( jackbuffer, &stream_.deviceBuffer[i*bufferBytes], bufferBytes );
2638 else { // no buffer conversion
2639 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2640 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2641 memcpy( jackbuffer, &stream_.userBuffer[0][i*bufferBytes], bufferBytes );
2646 // Don't bother draining input
2647 if ( handle->drainCounter ) {
2648 handle->drainCounter++;
2652 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
2654 if ( stream_.doConvertBuffer[1] ) {
2655 for ( unsigned int i=0; i<stream_.nDeviceChannels[1]; i++ ) {
2656 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
2657 memcpy( &stream_.deviceBuffer[i*bufferBytes], jackbuffer, bufferBytes );
2659 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
2661 else { // no buffer conversion
2662 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2663 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
2664 memcpy( &stream_.userBuffer[1][i*bufferBytes], jackbuffer, bufferBytes );
2670 RtApi::tickStreamTime();
2673 //******************** End of __UNIX_JACK__ *********************//
2676 #if defined(__WINDOWS_ASIO__) // ASIO API on Windows
2678 // The ASIO API is designed around a callback scheme, so this
2679 // implementation is similar to that used for OS-X CoreAudio and Linux
2680 // Jack. The primary constraint with ASIO is that it only allows
2681 // access to a single driver at a time. Thus, it is not possible to
2682 // have more than one simultaneous RtAudio stream.
2684 // This implementation also requires a number of external ASIO files
2685 // and a few global variables. The ASIO callback scheme does not
2686 // allow for the passing of user data, so we must create a global
2687 // pointer to our callbackInfo structure.
2689 // On unix systems, we make use of a pthread condition variable.
2690 // Since there is no equivalent in Windows, I hacked something based
2691 // on information found in
2692 // http://www.cs.wustl.edu/~schmidt/win32-cv-1.html.
2694 #include "asiosys.h"
2696 #include "iasiothiscallresolver.h"
2697 #include "asiodrivers.h"
2700 static AsioDrivers drivers;
2701 static ASIOCallbacks asioCallbacks;
2702 static ASIODriverInfo driverInfo;
2703 static CallbackInfo *asioCallbackInfo;
2704 static bool asioXRun;
2707 int drainCounter; // Tracks callback counts when draining
2708 bool internalDrain; // Indicates if stop is initiated from callback or not.
2709 ASIOBufferInfo *bufferInfos;
2713 :drainCounter(0), internalDrain(false), bufferInfos(0) {}
2716 // Function declarations (definitions at end of section)
2717 static const char* getAsioErrorString( ASIOError result );
2718 static void sampleRateChanged( ASIOSampleRate sRate );
2719 static long asioMessages( long selector, long value, void* message, double* opt );
2721 RtApiAsio :: RtApiAsio()
2723 // ASIO cannot run on a multi-threaded appartment. You can call
2724 // CoInitialize beforehand, but it must be for appartment threading
2725 // (in which case, CoInitilialize will return S_FALSE here).
2726 coInitialized_ = false;
2727 HRESULT hr = CoInitialize( NULL );
2729 errorText_ = "RtApiAsio::ASIO requires a single-threaded appartment. Call CoInitializeEx(0,COINIT_APARTMENTTHREADED)";
2730 error( RtAudioError::WARNING );
2732 coInitialized_ = true;
2734 drivers.removeCurrentDriver();
2735 driverInfo.asioVersion = 2;
2737 // See note in DirectSound implementation about GetDesktopWindow().
2738 driverInfo.sysRef = GetForegroundWindow();
2741 RtApiAsio :: ~RtApiAsio()
2743 if ( stream_.state != STREAM_CLOSED ) closeStream();
2744 if ( coInitialized_ ) CoUninitialize();
2747 unsigned int RtApiAsio :: getDeviceCount( void )
2749 return (unsigned int) drivers.asioGetNumDev();
2752 RtAudio::DeviceInfo RtApiAsio :: getDeviceInfo( unsigned int device )
2754 RtAudio::DeviceInfo info;
2755 info.probed = false;
2758 unsigned int nDevices = getDeviceCount();
2759 if ( nDevices == 0 ) {
2760 errorText_ = "RtApiAsio::getDeviceInfo: no devices found!";
2761 error( RtAudioError::INVALID_USE );
2765 if ( device >= nDevices ) {
2766 errorText_ = "RtApiAsio::getDeviceInfo: device ID is invalid!";
2767 error( RtAudioError::INVALID_USE );
2771 // If a stream is already open, we cannot probe other devices. Thus, use the saved results.
2772 if ( stream_.state != STREAM_CLOSED ) {
2773 if ( device >= devices_.size() ) {
2774 errorText_ = "RtApiAsio::getDeviceInfo: device ID was not present before stream was opened.";
2775 error( RtAudioError::WARNING );
2778 return devices_[ device ];
2781 char driverName[32];
2782 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
2783 if ( result != ASE_OK ) {
2784 errorStream_ << "RtApiAsio::getDeviceInfo: unable to get driver name (" << getAsioErrorString( result ) << ").";
2785 errorText_ = errorStream_.str();
2786 error( RtAudioError::WARNING );
2790 info.name = driverName;
2792 if ( !drivers.loadDriver( driverName ) ) {
2793 errorStream_ << "RtApiAsio::getDeviceInfo: unable to load driver (" << driverName << ").";
2794 errorText_ = errorStream_.str();
2795 error( RtAudioError::WARNING );
2799 result = ASIOInit( &driverInfo );
2800 if ( result != ASE_OK ) {
2801 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
2802 errorText_ = errorStream_.str();
2803 error( RtAudioError::WARNING );
2807 // Determine the device channel information.
2808 long inputChannels, outputChannels;
2809 result = ASIOGetChannels( &inputChannels, &outputChannels );
2810 if ( result != ASE_OK ) {
2811 drivers.removeCurrentDriver();
2812 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
2813 errorText_ = errorStream_.str();
2814 error( RtAudioError::WARNING );
2818 info.outputChannels = outputChannels;
2819 info.inputChannels = inputChannels;
2820 if ( info.outputChannels > 0 && info.inputChannels > 0 )
2821 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
2823 // Determine the supported sample rates.
2824 info.sampleRates.clear();
2825 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
2826 result = ASIOCanSampleRate( (ASIOSampleRate) SAMPLE_RATES[i] );
2827 if ( result == ASE_OK ) {
2828 info.sampleRates.push_back( SAMPLE_RATES[i] );
2830 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
2831 info.preferredSampleRate = SAMPLE_RATES[i];
2835 // Determine supported data types ... just check first channel and assume rest are the same.
2836 ASIOChannelInfo channelInfo;
2837 channelInfo.channel = 0;
2838 channelInfo.isInput = true;
2839 if ( info.inputChannels <= 0 ) channelInfo.isInput = false;
2840 result = ASIOGetChannelInfo( &channelInfo );
2841 if ( result != ASE_OK ) {
2842 drivers.removeCurrentDriver();
2843 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting driver channel info (" << driverName << ").";
2844 errorText_ = errorStream_.str();
2845 error( RtAudioError::WARNING );
2849 info.nativeFormats = 0;
2850 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB )
2851 info.nativeFormats |= RTAUDIO_SINT16;
2852 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB )
2853 info.nativeFormats |= RTAUDIO_SINT32;
2854 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB )
2855 info.nativeFormats |= RTAUDIO_FLOAT32;
2856 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB )
2857 info.nativeFormats |= RTAUDIO_FLOAT64;
2858 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB )
2859 info.nativeFormats |= RTAUDIO_SINT24;
2861 if ( info.outputChannels > 0 )
2862 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
2863 if ( info.inputChannels > 0 )
2864 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
2867 drivers.removeCurrentDriver();
2871 static void bufferSwitch( long index, ASIOBool /*processNow*/ )
2873 RtApiAsio *object = (RtApiAsio *) asioCallbackInfo->object;
2874 object->callbackEvent( index );
2877 void RtApiAsio :: saveDeviceInfo( void )
2881 unsigned int nDevices = getDeviceCount();
2882 devices_.resize( nDevices );
2883 for ( unsigned int i=0; i<nDevices; i++ )
2884 devices_[i] = getDeviceInfo( i );
2887 bool RtApiAsio :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
2888 unsigned int firstChannel, unsigned int sampleRate,
2889 RtAudioFormat format, unsigned int *bufferSize,
2890 RtAudio::StreamOptions *options )
2891 {////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
2893 bool isDuplexInput = mode == INPUT && stream_.mode == OUTPUT;
2895 // For ASIO, a duplex stream MUST use the same driver.
2896 if ( isDuplexInput && stream_.device[0] != device ) {
2897 errorText_ = "RtApiAsio::probeDeviceOpen: an ASIO duplex stream must use the same device for input and output!";
2901 char driverName[32];
2902 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
2903 if ( result != ASE_OK ) {
2904 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to get driver name (" << getAsioErrorString( result ) << ").";
2905 errorText_ = errorStream_.str();
2909 // Only load the driver once for duplex stream.
2910 if ( !isDuplexInput ) {
2911 // The getDeviceInfo() function will not work when a stream is open
2912 // because ASIO does not allow multiple devices to run at the same
2913 // time. Thus, we'll probe the system before opening a stream and
2914 // save the results for use by getDeviceInfo().
2915 this->saveDeviceInfo();
2917 if ( !drivers.loadDriver( driverName ) ) {
2918 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to load driver (" << driverName << ").";
2919 errorText_ = errorStream_.str();
2923 result = ASIOInit( &driverInfo );
2924 if ( result != ASE_OK ) {
2925 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
2926 errorText_ = errorStream_.str();
2931 // keep them before any "goto error", they are used for error cleanup + goto device boundary checks
2932 bool buffersAllocated = false;
2933 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
2934 unsigned int nChannels;
2937 // Check the device channel count.
2938 long inputChannels, outputChannels;
2939 result = ASIOGetChannels( &inputChannels, &outputChannels );
2940 if ( result != ASE_OK ) {
2941 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
2942 errorText_ = errorStream_.str();
2946 if ( ( mode == OUTPUT && (channels+firstChannel) > (unsigned int) outputChannels) ||
2947 ( mode == INPUT && (channels+firstChannel) > (unsigned int) inputChannels) ) {
2948 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested channel count (" << channels << ") + offset (" << firstChannel << ").";
2949 errorText_ = errorStream_.str();
2952 stream_.nDeviceChannels[mode] = channels;
2953 stream_.nUserChannels[mode] = channels;
2954 stream_.channelOffset[mode] = firstChannel;
2956 // Verify the sample rate is supported.
2957 result = ASIOCanSampleRate( (ASIOSampleRate) sampleRate );
2958 if ( result != ASE_OK ) {
2959 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested sample rate (" << sampleRate << ").";
2960 errorText_ = errorStream_.str();
2964 // Get the current sample rate
2965 ASIOSampleRate currentRate;
2966 result = ASIOGetSampleRate( ¤tRate );
2967 if ( result != ASE_OK ) {
2968 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error getting sample rate.";
2969 errorText_ = errorStream_.str();
2973 // Set the sample rate only if necessary
2974 if ( currentRate != sampleRate ) {
2975 result = ASIOSetSampleRate( (ASIOSampleRate) sampleRate );
2976 if ( result != ASE_OK ) {
2977 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error setting sample rate (" << sampleRate << ").";
2978 errorText_ = errorStream_.str();
2983 // Determine the driver data type.
2984 ASIOChannelInfo channelInfo;
2985 channelInfo.channel = 0;
2986 if ( mode == OUTPUT ) channelInfo.isInput = false;
2987 else channelInfo.isInput = true;
2988 result = ASIOGetChannelInfo( &channelInfo );
2989 if ( result != ASE_OK ) {
2990 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting data format.";
2991 errorText_ = errorStream_.str();
2995 // Assuming WINDOWS host is always little-endian.
2996 stream_.doByteSwap[mode] = false;
2997 stream_.userFormat = format;
2998 stream_.deviceFormat[mode] = 0;
2999 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB ) {
3000 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
3001 if ( channelInfo.type == ASIOSTInt16MSB ) stream_.doByteSwap[mode] = true;
3003 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB ) {
3004 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
3005 if ( channelInfo.type == ASIOSTInt32MSB ) stream_.doByteSwap[mode] = true;
3007 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB ) {
3008 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
3009 if ( channelInfo.type == ASIOSTFloat32MSB ) stream_.doByteSwap[mode] = true;
3011 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB ) {
3012 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
3013 if ( channelInfo.type == ASIOSTFloat64MSB ) stream_.doByteSwap[mode] = true;
3015 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB ) {
3016 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
3017 if ( channelInfo.type == ASIOSTInt24MSB ) stream_.doByteSwap[mode] = true;
3020 if ( stream_.deviceFormat[mode] == 0 ) {
3021 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") data format not supported by RtAudio.";
3022 errorText_ = errorStream_.str();
3026 // Set the buffer size. For a duplex stream, this will end up
3027 // setting the buffer size based on the input constraints, which
3029 long minSize, maxSize, preferSize, granularity;
3030 result = ASIOGetBufferSize( &minSize, &maxSize, &preferSize, &granularity );
3031 if ( result != ASE_OK ) {
3032 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting buffer size.";
3033 errorText_ = errorStream_.str();
3037 if ( isDuplexInput ) {
3038 // When this is the duplex input (output was opened before), then we have to use the same
3039 // buffersize as the output, because it might use the preferred buffer size, which most
3040 // likely wasn't passed as input to this. The buffer sizes have to be identically anyway,
3041 // So instead of throwing an error, make them equal. The caller uses the reference
3042 // to the "bufferSize" param as usual to set up processing buffers.
3044 *bufferSize = stream_.bufferSize;
3047 if ( *bufferSize == 0 ) *bufferSize = preferSize;
3048 else if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
3049 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
3050 else if ( granularity == -1 ) {
3051 // Make sure bufferSize is a power of two.
3052 int log2_of_min_size = 0;
3053 int log2_of_max_size = 0;
3055 for ( unsigned int i = 0; i < sizeof(long) * 8; i++ ) {
3056 if ( minSize & ((long)1 << i) ) log2_of_min_size = i;
3057 if ( maxSize & ((long)1 << i) ) log2_of_max_size = i;
3060 long min_delta = std::abs( (long)*bufferSize - ((long)1 << log2_of_min_size) );
3061 int min_delta_num = log2_of_min_size;
3063 for (int i = log2_of_min_size + 1; i <= log2_of_max_size; i++) {
3064 long current_delta = std::abs( (long)*bufferSize - ((long)1 << i) );
3065 if (current_delta < min_delta) {
3066 min_delta = current_delta;
3071 *bufferSize = ( (unsigned int)1 << min_delta_num );
3072 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
3073 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
3075 else if ( granularity != 0 ) {
3076 // Set to an even multiple of granularity, rounding up.
3077 *bufferSize = (*bufferSize + granularity-1) / granularity * granularity;
3082 // we don't use it anymore, see above!
3083 // Just left it here for the case...
3084 if ( isDuplexInput && stream_.bufferSize != *bufferSize ) {
3085 errorText_ = "RtApiAsio::probeDeviceOpen: input/output buffersize discrepancy!";
3090 stream_.bufferSize = *bufferSize;
3091 stream_.nBuffers = 2;
3093 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
3094 else stream_.userInterleaved = true;
3096 // ASIO always uses non-interleaved buffers.
3097 stream_.deviceInterleaved[mode] = false;
3099 // Allocate, if necessary, our AsioHandle structure for the stream.
3100 if ( handle == 0 ) {
3102 handle = new AsioHandle;
3104 catch ( std::bad_alloc& ) {
3105 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating AsioHandle memory.";
3108 handle->bufferInfos = 0;
3110 // Create a manual-reset event.
3111 handle->condition = CreateEvent( NULL, // no security
3112 TRUE, // manual-reset
3113 FALSE, // non-signaled initially
3115 stream_.apiHandle = (void *) handle;
3118 // Create the ASIO internal buffers. Since RtAudio sets up input
3119 // and output separately, we'll have to dispose of previously
3120 // created output buffers for a duplex stream.
3121 if ( mode == INPUT && stream_.mode == OUTPUT ) {
3122 ASIODisposeBuffers();
3123 if ( handle->bufferInfos ) free( handle->bufferInfos );
3126 // Allocate, initialize, and save the bufferInfos in our stream callbackInfo structure.
3128 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
3129 handle->bufferInfos = (ASIOBufferInfo *) malloc( nChannels * sizeof(ASIOBufferInfo) );
3130 if ( handle->bufferInfos == NULL ) {
3131 errorStream_ << "RtApiAsio::probeDeviceOpen: error allocating bufferInfo memory for driver (" << driverName << ").";
3132 errorText_ = errorStream_.str();
3136 ASIOBufferInfo *infos;
3137 infos = handle->bufferInfos;
3138 for ( i=0; i<stream_.nDeviceChannels[0]; i++, infos++ ) {
3139 infos->isInput = ASIOFalse;
3140 infos->channelNum = i + stream_.channelOffset[0];
3141 infos->buffers[0] = infos->buffers[1] = 0;
3143 for ( i=0; i<stream_.nDeviceChannels[1]; i++, infos++ ) {
3144 infos->isInput = ASIOTrue;
3145 infos->channelNum = i + stream_.channelOffset[1];
3146 infos->buffers[0] = infos->buffers[1] = 0;
3149 // prepare for callbacks
3150 stream_.sampleRate = sampleRate;
3151 stream_.device[mode] = device;
3152 stream_.mode = isDuplexInput ? DUPLEX : mode;
3154 // store this class instance before registering callbacks, that are going to use it
3155 asioCallbackInfo = &stream_.callbackInfo;
3156 stream_.callbackInfo.object = (void *) this;
3158 // Set up the ASIO callback structure and create the ASIO data buffers.
3159 asioCallbacks.bufferSwitch = &bufferSwitch;
3160 asioCallbacks.sampleRateDidChange = &sampleRateChanged;
3161 asioCallbacks.asioMessage = &asioMessages;
3162 asioCallbacks.bufferSwitchTimeInfo = NULL;
3163 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
3164 if ( result != ASE_OK ) {
3165 // Standard method failed. This can happen with strict/misbehaving drivers that return valid buffer size ranges
3166 // but only accept the preferred buffer size as parameter for ASIOCreateBuffers. eg. Creatives ASIO driver
3167 // in that case, let's be naïve and try that instead
3168 *bufferSize = preferSize;
3169 stream_.bufferSize = *bufferSize;
3170 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
3173 if ( result != ASE_OK ) {
3174 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") creating buffers.";
3175 errorText_ = errorStream_.str();
3178 buffersAllocated = true;
3179 stream_.state = STREAM_STOPPED;
3181 // Set flags for buffer conversion.
3182 stream_.doConvertBuffer[mode] = false;
3183 if ( stream_.userFormat != stream_.deviceFormat[mode] )
3184 stream_.doConvertBuffer[mode] = true;
3185 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
3186 stream_.nUserChannels[mode] > 1 )
3187 stream_.doConvertBuffer[mode] = true;
3189 // Allocate necessary internal buffers
3190 unsigned long bufferBytes;
3191 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
3192 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
3193 if ( stream_.userBuffer[mode] == NULL ) {
3194 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating user buffer memory.";
3198 if ( stream_.doConvertBuffer[mode] ) {
3200 bool makeBuffer = true;
3201 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
3202 if ( isDuplexInput && stream_.deviceBuffer ) {
3203 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
3204 if ( bufferBytes <= bytesOut ) makeBuffer = false;
3208 bufferBytes *= *bufferSize;
3209 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
3210 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
3211 if ( stream_.deviceBuffer == NULL ) {
3212 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating device buffer memory.";
3218 // Determine device latencies
3219 long inputLatency, outputLatency;
3220 result = ASIOGetLatencies( &inputLatency, &outputLatency );
3221 if ( result != ASE_OK ) {
3222 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting latency.";
3223 errorText_ = errorStream_.str();
3224 error( RtAudioError::WARNING); // warn but don't fail
3227 stream_.latency[0] = outputLatency;
3228 stream_.latency[1] = inputLatency;
3231 // Setup the buffer conversion information structure. We don't use
3232 // buffers to do channel offsets, so we override that parameter
3234 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
3239 if ( !isDuplexInput ) {
3240 // the cleanup for error in the duplex input, is done by RtApi::openStream
3241 // So we clean up for single channel only
3243 if ( buffersAllocated )
3244 ASIODisposeBuffers();
3246 drivers.removeCurrentDriver();
3249 CloseHandle( handle->condition );
3250 if ( handle->bufferInfos )
3251 free( handle->bufferInfos );
3254 stream_.apiHandle = 0;
3258 if ( stream_.userBuffer[mode] ) {
3259 free( stream_.userBuffer[mode] );
3260 stream_.userBuffer[mode] = 0;
3263 if ( stream_.deviceBuffer ) {
3264 free( stream_.deviceBuffer );
3265 stream_.deviceBuffer = 0;
3270 }////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
3272 void RtApiAsio :: closeStream()
3274 if ( stream_.state == STREAM_CLOSED ) {
3275 errorText_ = "RtApiAsio::closeStream(): no open stream to close!";
3276 error( RtAudioError::WARNING );
3280 if ( stream_.state == STREAM_RUNNING ) {
3281 stream_.state = STREAM_STOPPED;
3284 ASIODisposeBuffers();
3285 drivers.removeCurrentDriver();
3287 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3289 CloseHandle( handle->condition );
3290 if ( handle->bufferInfos )
3291 free( handle->bufferInfos );
3293 stream_.apiHandle = 0;
3296 for ( int i=0; i<2; i++ ) {
3297 if ( stream_.userBuffer[i] ) {
3298 free( stream_.userBuffer[i] );
3299 stream_.userBuffer[i] = 0;
3303 if ( stream_.deviceBuffer ) {
3304 free( stream_.deviceBuffer );
3305 stream_.deviceBuffer = 0;
3308 stream_.mode = UNINITIALIZED;
3309 stream_.state = STREAM_CLOSED;
3312 bool stopThreadCalled = false;
3314 void RtApiAsio :: startStream()
3317 if ( stream_.state == STREAM_RUNNING ) {
3318 errorText_ = "RtApiAsio::startStream(): the stream is already running!";
3319 error( RtAudioError::WARNING );
3323 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3324 ASIOError result = ASIOStart();
3325 if ( result != ASE_OK ) {
3326 errorStream_ << "RtApiAsio::startStream: error (" << getAsioErrorString( result ) << ") starting device.";
3327 errorText_ = errorStream_.str();
3331 handle->drainCounter = 0;
3332 handle->internalDrain = false;
3333 ResetEvent( handle->condition );
3334 stream_.state = STREAM_RUNNING;
3338 stopThreadCalled = false;
3340 if ( result == ASE_OK ) return;
3341 error( RtAudioError::SYSTEM_ERROR );
3344 void RtApiAsio :: stopStream()
3347 if ( stream_.state == STREAM_STOPPED ) {
3348 errorText_ = "RtApiAsio::stopStream(): the stream is already stopped!";
3349 error( RtAudioError::WARNING );
3353 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3354 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
3355 if ( handle->drainCounter == 0 ) {
3356 handle->drainCounter = 2;
3357 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
3361 stream_.state = STREAM_STOPPED;
3363 ASIOError result = ASIOStop();
3364 if ( result != ASE_OK ) {
3365 errorStream_ << "RtApiAsio::stopStream: error (" << getAsioErrorString( result ) << ") stopping device.";
3366 errorText_ = errorStream_.str();
3369 if ( result == ASE_OK ) return;
3370 error( RtAudioError::SYSTEM_ERROR );
3373 void RtApiAsio :: abortStream()
3376 if ( stream_.state == STREAM_STOPPED ) {
3377 errorText_ = "RtApiAsio::abortStream(): the stream is already stopped!";
3378 error( RtAudioError::WARNING );
3382 // The following lines were commented-out because some behavior was
3383 // noted where the device buffers need to be zeroed to avoid
3384 // continuing sound, even when the device buffers are completely
3385 // disposed. So now, calling abort is the same as calling stop.
3386 // AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3387 // handle->drainCounter = 2;
3391 // This function will be called by a spawned thread when the user
3392 // callback function signals that the stream should be stopped or
3393 // aborted. It is necessary to handle it this way because the
3394 // callbackEvent() function must return before the ASIOStop()
3395 // function will return.
3396 static unsigned __stdcall asioStopStream( void *ptr )
3398 CallbackInfo *info = (CallbackInfo *) ptr;
3399 RtApiAsio *object = (RtApiAsio *) info->object;
3401 object->stopStream();
3406 bool RtApiAsio :: callbackEvent( long bufferIndex )
3408 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
3409 if ( stream_.state == STREAM_CLOSED ) {
3410 errorText_ = "RtApiAsio::callbackEvent(): the stream is closed ... this shouldn't happen!";
3411 error( RtAudioError::WARNING );
3415 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
3416 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3418 // Check if we were draining the stream and signal if finished.
3419 if ( handle->drainCounter > 3 ) {
3421 stream_.state = STREAM_STOPPING;
3422 if ( handle->internalDrain == false )
3423 SetEvent( handle->condition );
3424 else { // spawn a thread to stop the stream
3426 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
3427 &stream_.callbackInfo, 0, &threadId );
3432 // Invoke user callback to get fresh output data UNLESS we are
3434 if ( handle->drainCounter == 0 ) {
3435 RtAudioCallback callback = (RtAudioCallback) info->callback;
3436 double streamTime = getStreamTime();
3437 RtAudioStreamStatus status = 0;
3438 if ( stream_.mode != INPUT && asioXRun == true ) {
3439 status |= RTAUDIO_OUTPUT_UNDERFLOW;
3442 if ( stream_.mode != OUTPUT && asioXRun == true ) {
3443 status |= RTAUDIO_INPUT_OVERFLOW;
3446 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
3447 stream_.bufferSize, streamTime, status, info->userData );
3448 if ( cbReturnValue == 2 ) {
3449 stream_.state = STREAM_STOPPING;
3450 handle->drainCounter = 2;
3452 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
3453 &stream_.callbackInfo, 0, &threadId );
3456 else if ( cbReturnValue == 1 ) {
3457 handle->drainCounter = 1;
3458 handle->internalDrain = true;
3462 unsigned int nChannels, bufferBytes, i, j;
3463 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
3464 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
3466 bufferBytes = stream_.bufferSize * formatBytes( stream_.deviceFormat[0] );
3468 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
3470 for ( i=0, j=0; i<nChannels; i++ ) {
3471 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3472 memset( handle->bufferInfos[i].buffers[bufferIndex], 0, bufferBytes );
3476 else if ( stream_.doConvertBuffer[0] ) {
3478 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
3479 if ( stream_.doByteSwap[0] )
3480 byteSwapBuffer( stream_.deviceBuffer,
3481 stream_.bufferSize * stream_.nDeviceChannels[0],
3482 stream_.deviceFormat[0] );
3484 for ( i=0, j=0; i<nChannels; i++ ) {
3485 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3486 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
3487 &stream_.deviceBuffer[j++*bufferBytes], bufferBytes );
3493 if ( stream_.doByteSwap[0] )
3494 byteSwapBuffer( stream_.userBuffer[0],
3495 stream_.bufferSize * stream_.nUserChannels[0],
3496 stream_.userFormat );
3498 for ( i=0, j=0; i<nChannels; i++ ) {
3499 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3500 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
3501 &stream_.userBuffer[0][bufferBytes*j++], bufferBytes );
3507 // Don't bother draining input
3508 if ( handle->drainCounter ) {
3509 handle->drainCounter++;
3513 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
3515 bufferBytes = stream_.bufferSize * formatBytes(stream_.deviceFormat[1]);
3517 if (stream_.doConvertBuffer[1]) {
3519 // Always interleave ASIO input data.
3520 for ( i=0, j=0; i<nChannels; i++ ) {
3521 if ( handle->bufferInfos[i].isInput == ASIOTrue )
3522 memcpy( &stream_.deviceBuffer[j++*bufferBytes],
3523 handle->bufferInfos[i].buffers[bufferIndex],
3527 if ( stream_.doByteSwap[1] )
3528 byteSwapBuffer( stream_.deviceBuffer,
3529 stream_.bufferSize * stream_.nDeviceChannels[1],
3530 stream_.deviceFormat[1] );
3531 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
3535 for ( i=0, j=0; i<nChannels; i++ ) {
3536 if ( handle->bufferInfos[i].isInput == ASIOTrue ) {
3537 memcpy( &stream_.userBuffer[1][bufferBytes*j++],
3538 handle->bufferInfos[i].buffers[bufferIndex],
3543 if ( stream_.doByteSwap[1] )
3544 byteSwapBuffer( stream_.userBuffer[1],
3545 stream_.bufferSize * stream_.nUserChannels[1],
3546 stream_.userFormat );
3551 // The following call was suggested by Malte Clasen. While the API
3552 // documentation indicates it should not be required, some device
3553 // drivers apparently do not function correctly without it.
3556 RtApi::tickStreamTime();
3560 static void sampleRateChanged( ASIOSampleRate sRate )
3562 // The ASIO documentation says that this usually only happens during
3563 // external sync. Audio processing is not stopped by the driver,
3564 // actual sample rate might not have even changed, maybe only the
3565 // sample rate status of an AES/EBU or S/PDIF digital input at the
3568 RtApi *object = (RtApi *) asioCallbackInfo->object;
3570 object->stopStream();
3572 catch ( RtAudioError &exception ) {
3573 std::cerr << "\nRtApiAsio: sampleRateChanged() error (" << exception.getMessage() << ")!\n" << std::endl;
3577 std::cerr << "\nRtApiAsio: driver reports sample rate changed to " << sRate << " ... stream stopped!!!\n" << std::endl;
3580 static long asioMessages( long selector, long value, void* /*message*/, double* /*opt*/ )
3584 switch( selector ) {
3585 case kAsioSelectorSupported:
3586 if ( value == kAsioResetRequest
3587 || value == kAsioEngineVersion
3588 || value == kAsioResyncRequest
3589 || value == kAsioLatenciesChanged
3590 // The following three were added for ASIO 2.0, you don't
3591 // necessarily have to support them.
3592 || value == kAsioSupportsTimeInfo
3593 || value == kAsioSupportsTimeCode
3594 || value == kAsioSupportsInputMonitor)
3597 case kAsioResetRequest:
3598 // Defer the task and perform the reset of the driver during the
3599 // next "safe" situation. You cannot reset the driver right now,
3600 // as this code is called from the driver. Reset the driver is
3601 // done by completely destruct is. I.e. ASIOStop(),
3602 // ASIODisposeBuffers(), Destruction Afterwards you initialize the
3604 std::cerr << "\nRtApiAsio: driver reset requested!!!" << std::endl;
3607 case kAsioResyncRequest:
3608 // This informs the application that the driver encountered some
3609 // non-fatal data loss. It is used for synchronization purposes
3610 // of different media. Added mainly to work around the Win16Mutex
3611 // problems in Windows 95/98 with the Windows Multimedia system,
3612 // which could lose data because the Mutex was held too long by
3613 // another thread. However a driver can issue it in other
3615 // std::cerr << "\nRtApiAsio: driver resync requested!!!" << std::endl;
3619 case kAsioLatenciesChanged:
3620 // This will inform the host application that the drivers were
3621 // latencies changed. Beware, it this does not mean that the
3622 // buffer sizes have changed! You might need to update internal
3624 std::cerr << "\nRtApiAsio: driver latency may have changed!!!" << std::endl;
3627 case kAsioEngineVersion:
3628 // Return the supported ASIO version of the host application. If
3629 // a host application does not implement this selector, ASIO 1.0
3630 // is assumed by the driver.
3633 case kAsioSupportsTimeInfo:
3634 // Informs the driver whether the
3635 // asioCallbacks.bufferSwitchTimeInfo() callback is supported.
3636 // For compatibility with ASIO 1.0 drivers the host application
3637 // should always support the "old" bufferSwitch method, too.
3640 case kAsioSupportsTimeCode:
3641 // Informs the driver whether application is interested in time
3642 // code info. If an application does not need to know about time
3643 // code, the driver has less work to do.
3650 static const char* getAsioErrorString( ASIOError result )
3658 static const Messages m[] =
3660 { ASE_NotPresent, "Hardware input or output is not present or available." },
3661 { ASE_HWMalfunction, "Hardware is malfunctioning." },
3662 { ASE_InvalidParameter, "Invalid input parameter." },
3663 { ASE_InvalidMode, "Invalid mode." },
3664 { ASE_SPNotAdvancing, "Sample position not advancing." },
3665 { ASE_NoClock, "Sample clock or rate cannot be determined or is not present." },
3666 { ASE_NoMemory, "Not enough memory to complete the request." }
3669 for ( unsigned int i = 0; i < sizeof(m)/sizeof(m[0]); ++i )
3670 if ( m[i].value == result ) return m[i].message;
3672 return "Unknown error.";
3675 //******************** End of __WINDOWS_ASIO__ *********************//
3679 #if defined(__WINDOWS_WASAPI__) // Windows WASAPI API
3681 // Authored by Marcus Tomlinson <themarcustomlinson@gmail.com>, April 2014
3682 // - Introduces support for the Windows WASAPI API
3683 // - Aims to deliver bit streams to and from hardware at the lowest possible latency, via the absolute minimum buffer sizes required
3684 // - Provides flexible stream configuration to an otherwise strict and inflexible WASAPI interface
3685 // - Includes automatic internal conversion of sample rate and buffer size between hardware and the user
3690 #include <audioclient.h>
3692 #include <mmdeviceapi.h>
3693 #include <functiondiscoverykeys_devpkey.h>
3695 //=============================================================================
3697 #define SAFE_RELEASE( objectPtr )\
3700 objectPtr->Release();\
3704 typedef HANDLE ( __stdcall *TAvSetMmThreadCharacteristicsPtr )( LPCWSTR TaskName, LPDWORD TaskIndex );
3706 //-----------------------------------------------------------------------------
3708 // WASAPI dictates stream sample rate, format, channel count, and in some cases, buffer size.
3709 // Therefore we must perform all necessary conversions to user buffers in order to satisfy these
3710 // requirements. WasapiBuffer ring buffers are used between HwIn->UserIn and UserOut->HwOut to
3711 // provide intermediate storage for read / write synchronization.
3725 // sets the length of the internal ring buffer
3726 void setBufferSize( unsigned int bufferSize, unsigned int formatBytes ) {
3729 buffer_ = ( char* ) calloc( bufferSize, formatBytes );
3731 bufferSize_ = bufferSize;
3736 // attempt to push a buffer into the ring buffer at the current "in" index
3737 bool pushBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
3739 if ( !buffer || // incoming buffer is NULL
3740 bufferSize == 0 || // incoming buffer has no data
3741 bufferSize > bufferSize_ ) // incoming buffer too large
3746 unsigned int relOutIndex = outIndex_;
3747 unsigned int inIndexEnd = inIndex_ + bufferSize;
3748 if ( relOutIndex < inIndex_ && inIndexEnd >= bufferSize_ ) {
3749 relOutIndex += bufferSize_;
3752 // "in" index can end on the "out" index but cannot begin at it
3753 if ( inIndex_ <= relOutIndex && inIndexEnd > relOutIndex ) {
3754 return false; // not enough space between "in" index and "out" index
3757 // copy buffer from external to internal
3758 int fromZeroSize = inIndex_ + bufferSize - bufferSize_;
3759 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
3760 int fromInSize = bufferSize - fromZeroSize;
3765 memcpy( &( ( char* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( char ) );
3766 memcpy( buffer_, &( ( char* ) buffer )[fromInSize], fromZeroSize * sizeof( char ) );
3768 case RTAUDIO_SINT16:
3769 memcpy( &( ( short* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( short ) );
3770 memcpy( buffer_, &( ( short* ) buffer )[fromInSize], fromZeroSize * sizeof( short ) );
3772 case RTAUDIO_SINT24:
3773 memcpy( &( ( S24* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( S24 ) );
3774 memcpy( buffer_, &( ( S24* ) buffer )[fromInSize], fromZeroSize * sizeof( S24 ) );
3776 case RTAUDIO_SINT32:
3777 memcpy( &( ( int* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( int ) );
3778 memcpy( buffer_, &( ( int* ) buffer )[fromInSize], fromZeroSize * sizeof( int ) );
3780 case RTAUDIO_FLOAT32:
3781 memcpy( &( ( float* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( float ) );
3782 memcpy( buffer_, &( ( float* ) buffer )[fromInSize], fromZeroSize * sizeof( float ) );
3784 case RTAUDIO_FLOAT64:
3785 memcpy( &( ( double* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( double ) );
3786 memcpy( buffer_, &( ( double* ) buffer )[fromInSize], fromZeroSize * sizeof( double ) );
3790 // update "in" index
3791 inIndex_ += bufferSize;
3792 inIndex_ %= bufferSize_;
3797 // attempt to pull a buffer from the ring buffer from the current "out" index
3798 bool pullBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
3800 if ( !buffer || // incoming buffer is NULL
3801 bufferSize == 0 || // incoming buffer has no data
3802 bufferSize > bufferSize_ ) // incoming buffer too large
3807 unsigned int relInIndex = inIndex_;
3808 unsigned int outIndexEnd = outIndex_ + bufferSize;
3809 if ( relInIndex < outIndex_ && outIndexEnd >= bufferSize_ ) {
3810 relInIndex += bufferSize_;
3813 // "out" index can begin at and end on the "in" index
3814 if ( outIndex_ < relInIndex && outIndexEnd > relInIndex ) {
3815 return false; // not enough space between "out" index and "in" index
3818 // copy buffer from internal to external
3819 int fromZeroSize = outIndex_ + bufferSize - bufferSize_;
3820 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
3821 int fromOutSize = bufferSize - fromZeroSize;
3826 memcpy( buffer, &( ( char* ) buffer_ )[outIndex_], fromOutSize * sizeof( char ) );
3827 memcpy( &( ( char* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( char ) );
3829 case RTAUDIO_SINT16:
3830 memcpy( buffer, &( ( short* ) buffer_ )[outIndex_], fromOutSize * sizeof( short ) );
3831 memcpy( &( ( short* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( short ) );
3833 case RTAUDIO_SINT24:
3834 memcpy( buffer, &( ( S24* ) buffer_ )[outIndex_], fromOutSize * sizeof( S24 ) );
3835 memcpy( &( ( S24* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( S24 ) );
3837 case RTAUDIO_SINT32:
3838 memcpy( buffer, &( ( int* ) buffer_ )[outIndex_], fromOutSize * sizeof( int ) );
3839 memcpy( &( ( int* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( int ) );
3841 case RTAUDIO_FLOAT32:
3842 memcpy( buffer, &( ( float* ) buffer_ )[outIndex_], fromOutSize * sizeof( float ) );
3843 memcpy( &( ( float* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( float ) );
3845 case RTAUDIO_FLOAT64:
3846 memcpy( buffer, &( ( double* ) buffer_ )[outIndex_], fromOutSize * sizeof( double ) );
3847 memcpy( &( ( double* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( double ) );
3851 // update "out" index
3852 outIndex_ += bufferSize;
3853 outIndex_ %= bufferSize_;
3860 unsigned int bufferSize_;
3861 unsigned int inIndex_;
3862 unsigned int outIndex_;
3865 //-----------------------------------------------------------------------------
3867 // In order to satisfy WASAPI's buffer requirements, we need a means of converting sample rate
3868 // between HW and the user. The convertBufferWasapi function is used to perform this conversion
3869 // between HwIn->UserIn and UserOut->HwOut during the stream callback loop.
3870 // This sample rate converter works best with conversions between one rate and its multiple.
3871 void convertBufferWasapi( char* outBuffer,
3872 const char* inBuffer,
3873 const unsigned int& channelCount,
3874 const unsigned int& inSampleRate,
3875 const unsigned int& outSampleRate,
3876 const unsigned int& inSampleCount,
3877 unsigned int& outSampleCount,
3878 const RtAudioFormat& format )
3880 // calculate the new outSampleCount and relative sampleStep
3881 float sampleRatio = ( float ) outSampleRate / inSampleRate;
3882 float sampleRatioInv = ( float ) 1 / sampleRatio;
3883 float sampleStep = 1.0f / sampleRatio;
3884 float inSampleFraction = 0.0f;
3886 // for cmath functions
3887 using namespace std;
3889 outSampleCount = ( unsigned int ) roundf( inSampleCount * sampleRatio );
3891 // if inSampleRate is a multiple of outSampleRate (or vice versa) there's no need to interpolate
3892 if ( floor( sampleRatio ) == sampleRatio || floor( sampleRatioInv ) == sampleRatioInv )
3894 // frame-by-frame, copy each relative input sample into it's corresponding output sample
3895 for ( unsigned int outSample = 0; outSample < outSampleCount; outSample++ )
3897 unsigned int inSample = ( unsigned int ) inSampleFraction;
3902 memcpy( &( ( char* ) outBuffer )[ outSample * channelCount ], &( ( char* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( char ) );
3904 case RTAUDIO_SINT16:
3905 memcpy( &( ( short* ) outBuffer )[ outSample * channelCount ], &( ( short* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( short ) );
3907 case RTAUDIO_SINT24:
3908 memcpy( &( ( S24* ) outBuffer )[ outSample * channelCount ], &( ( S24* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( S24 ) );
3910 case RTAUDIO_SINT32:
3911 memcpy( &( ( int* ) outBuffer )[ outSample * channelCount ], &( ( int* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( int ) );
3913 case RTAUDIO_FLOAT32:
3914 memcpy( &( ( float* ) outBuffer )[ outSample * channelCount ], &( ( float* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( float ) );
3916 case RTAUDIO_FLOAT64:
3917 memcpy( &( ( double* ) outBuffer )[ outSample * channelCount ], &( ( double* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( double ) );
3921 // jump to next in sample
3922 inSampleFraction += sampleStep;
3925 else // else interpolate
3927 // frame-by-frame, copy each relative input sample into it's corresponding output sample
3928 for ( unsigned int outSample = 0; outSample < outSampleCount; outSample++ )
3930 unsigned int inSample = ( unsigned int ) inSampleFraction;
3931 float inSampleDec = inSampleFraction - inSample;
3932 unsigned int frameInSample = inSample * channelCount;
3933 unsigned int frameOutSample = outSample * channelCount;
3939 for ( unsigned int channel = 0; channel < channelCount; channel++ )
3941 char fromSample = ( ( char* ) inBuffer )[ frameInSample + channel ];
3942 char toSample = ( ( char* ) inBuffer )[ frameInSample + channelCount + channel ];
3943 char sampleDiff = ( char ) ( ( toSample - fromSample ) * inSampleDec );
3944 ( ( char* ) outBuffer )[ frameOutSample + channel ] = fromSample + sampleDiff;
3948 case RTAUDIO_SINT16:
3950 for ( unsigned int channel = 0; channel < channelCount; channel++ )
3952 short fromSample = ( ( short* ) inBuffer )[ frameInSample + channel ];
3953 short toSample = ( ( short* ) inBuffer )[ frameInSample + channelCount + channel ];
3954 short sampleDiff = ( short ) ( ( toSample - fromSample ) * inSampleDec );
3955 ( ( short* ) outBuffer )[ frameOutSample + channel ] = fromSample + sampleDiff;
3959 case RTAUDIO_SINT24:
3961 for ( unsigned int channel = 0; channel < channelCount; channel++ )
3963 int fromSample = ( ( S24* ) inBuffer )[ frameInSample + channel ].asInt();
3964 int toSample = ( ( S24* ) inBuffer )[ frameInSample + channelCount + channel ].asInt();
3965 int sampleDiff = ( int ) ( ( toSample - fromSample ) * inSampleDec );
3966 ( ( S24* ) outBuffer )[ frameOutSample + channel ] = fromSample + sampleDiff;
3970 case RTAUDIO_SINT32:
3972 for ( unsigned int channel = 0; channel < channelCount; channel++ )
3974 int fromSample = ( ( int* ) inBuffer )[ frameInSample + channel ];
3975 int toSample = ( ( int* ) inBuffer )[ frameInSample + channelCount + channel ];
3976 int sampleDiff = ( int ) ( ( toSample - fromSample ) * inSampleDec );
3977 ( ( int* ) outBuffer )[ frameOutSample + channel ] = fromSample + sampleDiff;
3981 case RTAUDIO_FLOAT32:
3983 for ( unsigned int channel = 0; channel < channelCount; channel++ )
3985 float fromSample = ( ( float* ) inBuffer )[ frameInSample + channel ];
3986 float toSample = ( ( float* ) inBuffer )[ frameInSample + channelCount + channel ];
3987 float sampleDiff = ( toSample - fromSample ) * inSampleDec;
3988 ( ( float* ) outBuffer )[ frameOutSample + channel ] = fromSample + sampleDiff;
3992 case RTAUDIO_FLOAT64:
3994 for ( unsigned int channel = 0; channel < channelCount; channel++ )
3996 double fromSample = ( ( double* ) inBuffer )[ frameInSample + channel ];
3997 double toSample = ( ( double* ) inBuffer )[ frameInSample + channelCount + channel ];
3998 double sampleDiff = ( toSample - fromSample ) * inSampleDec;
3999 ( ( double* ) outBuffer )[ frameOutSample + channel ] = fromSample + sampleDiff;
4005 // jump to next in sample
4006 inSampleFraction += sampleStep;
4011 //-----------------------------------------------------------------------------
4013 // A structure to hold various information related to the WASAPI implementation.
4016 IAudioClient* captureAudioClient;
4017 IAudioClient* renderAudioClient;
4018 IAudioCaptureClient* captureClient;
4019 IAudioRenderClient* renderClient;
4020 HANDLE captureEvent;
4024 : captureAudioClient( NULL ),
4025 renderAudioClient( NULL ),
4026 captureClient( NULL ),
4027 renderClient( NULL ),
4028 captureEvent( NULL ),
4029 renderEvent( NULL ) {}
4032 //=============================================================================
4034 RtApiWasapi::RtApiWasapi()
4035 : coInitialized_( false ), deviceEnumerator_( NULL )
4037 // WASAPI can run either apartment or multi-threaded
4038 HRESULT hr = CoInitialize( NULL );
4039 if ( !FAILED( hr ) )
4040 coInitialized_ = true;
4042 // Instantiate device enumerator
4043 hr = CoCreateInstance( __uuidof( MMDeviceEnumerator ), NULL,
4044 CLSCTX_ALL, __uuidof( IMMDeviceEnumerator ),
4045 ( void** ) &deviceEnumerator_ );
4047 if ( FAILED( hr ) ) {
4048 errorText_ = "RtApiWasapi::RtApiWasapi: Unable to instantiate device enumerator";
4049 error( RtAudioError::DRIVER_ERROR );
4053 //-----------------------------------------------------------------------------
4055 RtApiWasapi::~RtApiWasapi()
4057 if ( stream_.state != STREAM_CLOSED )
4060 SAFE_RELEASE( deviceEnumerator_ );
4062 // If this object previously called CoInitialize()
4063 if ( coInitialized_ )
4067 //=============================================================================
4069 unsigned int RtApiWasapi::getDeviceCount( void )
4071 unsigned int captureDeviceCount = 0;
4072 unsigned int renderDeviceCount = 0;
4074 IMMDeviceCollection* captureDevices = NULL;
4075 IMMDeviceCollection* renderDevices = NULL;
4077 // Count capture devices
4079 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4080 if ( FAILED( hr ) ) {
4081 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device collection.";
4085 hr = captureDevices->GetCount( &captureDeviceCount );
4086 if ( FAILED( hr ) ) {
4087 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device count.";
4091 // Count render devices
4092 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4093 if ( FAILED( hr ) ) {
4094 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device collection.";
4098 hr = renderDevices->GetCount( &renderDeviceCount );
4099 if ( FAILED( hr ) ) {
4100 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device count.";
4105 // release all references
4106 SAFE_RELEASE( captureDevices );
4107 SAFE_RELEASE( renderDevices );
4109 if ( errorText_.empty() )
4110 return captureDeviceCount + renderDeviceCount;
4112 error( RtAudioError::DRIVER_ERROR );
4116 //-----------------------------------------------------------------------------
4118 RtAudio::DeviceInfo RtApiWasapi::getDeviceInfo( unsigned int device )
4120 RtAudio::DeviceInfo info;
4121 unsigned int captureDeviceCount = 0;
4122 unsigned int renderDeviceCount = 0;
4123 std::string defaultDeviceName;
4124 bool isCaptureDevice = false;
4126 PROPVARIANT deviceNameProp;
4127 PROPVARIANT defaultDeviceNameProp;
4129 IMMDeviceCollection* captureDevices = NULL;
4130 IMMDeviceCollection* renderDevices = NULL;
4131 IMMDevice* devicePtr = NULL;
4132 IMMDevice* defaultDevicePtr = NULL;
4133 IAudioClient* audioClient = NULL;
4134 IPropertyStore* devicePropStore = NULL;
4135 IPropertyStore* defaultDevicePropStore = NULL;
4137 WAVEFORMATEX* deviceFormat = NULL;
4138 WAVEFORMATEX* closestMatchFormat = NULL;
4141 info.probed = false;
4143 // Count capture devices
4145 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4146 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4147 if ( FAILED( hr ) ) {
4148 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device collection.";
4152 hr = captureDevices->GetCount( &captureDeviceCount );
4153 if ( FAILED( hr ) ) {
4154 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device count.";
4158 // Count render devices
4159 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4160 if ( FAILED( hr ) ) {
4161 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device collection.";
4165 hr = renderDevices->GetCount( &renderDeviceCount );
4166 if ( FAILED( hr ) ) {
4167 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device count.";
4171 // validate device index
4172 if ( device >= captureDeviceCount + renderDeviceCount ) {
4173 errorText_ = "RtApiWasapi::getDeviceInfo: Invalid device index.";
4174 errorType = RtAudioError::INVALID_USE;
4178 // determine whether index falls within capture or render devices
4179 if ( device >= renderDeviceCount ) {
4180 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
4181 if ( FAILED( hr ) ) {
4182 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device handle.";
4185 isCaptureDevice = true;
4188 hr = renderDevices->Item( device, &devicePtr );
4189 if ( FAILED( hr ) ) {
4190 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device handle.";
4193 isCaptureDevice = false;
4196 // get default device name
4197 if ( isCaptureDevice ) {
4198 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eCapture, eConsole, &defaultDevicePtr );
4199 if ( FAILED( hr ) ) {
4200 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default capture device handle.";
4205 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eRender, eConsole, &defaultDevicePtr );
4206 if ( FAILED( hr ) ) {
4207 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default render device handle.";
4212 hr = defaultDevicePtr->OpenPropertyStore( STGM_READ, &defaultDevicePropStore );
4213 if ( FAILED( hr ) ) {
4214 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open default device property store.";
4217 PropVariantInit( &defaultDeviceNameProp );
4219 hr = defaultDevicePropStore->GetValue( PKEY_Device_FriendlyName, &defaultDeviceNameProp );
4220 if ( FAILED( hr ) ) {
4221 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default device property: PKEY_Device_FriendlyName.";
4225 defaultDeviceName = convertCharPointerToStdString(defaultDeviceNameProp.pwszVal);
4228 hr = devicePtr->OpenPropertyStore( STGM_READ, &devicePropStore );
4229 if ( FAILED( hr ) ) {
4230 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open device property store.";
4234 PropVariantInit( &deviceNameProp );
4236 hr = devicePropStore->GetValue( PKEY_Device_FriendlyName, &deviceNameProp );
4237 if ( FAILED( hr ) ) {
4238 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device property: PKEY_Device_FriendlyName.";
4242 info.name =convertCharPointerToStdString(deviceNameProp.pwszVal);
4245 if ( isCaptureDevice ) {
4246 info.isDefaultInput = info.name == defaultDeviceName;
4247 info.isDefaultOutput = false;
4250 info.isDefaultInput = false;
4251 info.isDefaultOutput = info.name == defaultDeviceName;
4255 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL, NULL, ( void** ) &audioClient );
4256 if ( FAILED( hr ) ) {
4257 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device audio client.";
4261 hr = audioClient->GetMixFormat( &deviceFormat );
4262 if ( FAILED( hr ) ) {
4263 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device mix format.";
4267 if ( isCaptureDevice ) {
4268 info.inputChannels = deviceFormat->nChannels;
4269 info.outputChannels = 0;
4270 info.duplexChannels = 0;
4273 info.inputChannels = 0;
4274 info.outputChannels = deviceFormat->nChannels;
4275 info.duplexChannels = 0;
4279 info.sampleRates.clear();
4281 // allow support for all sample rates as we have a built-in sample rate converter
4282 for ( unsigned int i = 0; i < MAX_SAMPLE_RATES; i++ ) {
4283 info.sampleRates.push_back( SAMPLE_RATES[i] );
4285 info.preferredSampleRate = deviceFormat->nSamplesPerSec;
4288 info.nativeFormats = 0;
4290 if ( deviceFormat->wFormatTag == WAVE_FORMAT_IEEE_FLOAT ||
4291 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
4292 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_IEEE_FLOAT ) )
4294 if ( deviceFormat->wBitsPerSample == 32 ) {
4295 info.nativeFormats |= RTAUDIO_FLOAT32;
4297 else if ( deviceFormat->wBitsPerSample == 64 ) {
4298 info.nativeFormats |= RTAUDIO_FLOAT64;
4301 else if ( deviceFormat->wFormatTag == WAVE_FORMAT_PCM ||
4302 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
4303 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_PCM ) )
4305 if ( deviceFormat->wBitsPerSample == 8 ) {
4306 info.nativeFormats |= RTAUDIO_SINT8;
4308 else if ( deviceFormat->wBitsPerSample == 16 ) {
4309 info.nativeFormats |= RTAUDIO_SINT16;
4311 else if ( deviceFormat->wBitsPerSample == 24 ) {
4312 info.nativeFormats |= RTAUDIO_SINT24;
4314 else if ( deviceFormat->wBitsPerSample == 32 ) {
4315 info.nativeFormats |= RTAUDIO_SINT32;
4323 // release all references
4324 PropVariantClear( &deviceNameProp );
4325 PropVariantClear( &defaultDeviceNameProp );
4327 SAFE_RELEASE( captureDevices );
4328 SAFE_RELEASE( renderDevices );
4329 SAFE_RELEASE( devicePtr );
4330 SAFE_RELEASE( defaultDevicePtr );
4331 SAFE_RELEASE( audioClient );
4332 SAFE_RELEASE( devicePropStore );
4333 SAFE_RELEASE( defaultDevicePropStore );
4335 CoTaskMemFree( deviceFormat );
4336 CoTaskMemFree( closestMatchFormat );
4338 if ( !errorText_.empty() )
4343 //-----------------------------------------------------------------------------
4345 unsigned int RtApiWasapi::getDefaultOutputDevice( void )
4347 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
4348 if ( getDeviceInfo( i ).isDefaultOutput ) {
4356 //-----------------------------------------------------------------------------
4358 unsigned int RtApiWasapi::getDefaultInputDevice( void )
4360 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
4361 if ( getDeviceInfo( i ).isDefaultInput ) {
4369 //-----------------------------------------------------------------------------
4371 void RtApiWasapi::closeStream( void )
4373 if ( stream_.state == STREAM_CLOSED ) {
4374 errorText_ = "RtApiWasapi::closeStream: No open stream to close.";
4375 error( RtAudioError::WARNING );
4379 if ( stream_.state != STREAM_STOPPED )
4382 // clean up stream memory
4383 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient )
4384 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient )
4386 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureClient )
4387 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderClient )
4389 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent )
4390 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent );
4392 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent )
4393 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent );
4395 delete ( WasapiHandle* ) stream_.apiHandle;
4396 stream_.apiHandle = NULL;
4398 for ( int i = 0; i < 2; i++ ) {
4399 if ( stream_.userBuffer[i] ) {
4400 free( stream_.userBuffer[i] );
4401 stream_.userBuffer[i] = 0;
4405 if ( stream_.deviceBuffer ) {
4406 free( stream_.deviceBuffer );
4407 stream_.deviceBuffer = 0;
4410 // update stream state
4411 stream_.state = STREAM_CLOSED;
4414 //-----------------------------------------------------------------------------
4416 void RtApiWasapi::startStream( void )
4420 if ( stream_.state == STREAM_RUNNING ) {
4421 errorText_ = "RtApiWasapi::startStream: The stream is already running.";
4422 error( RtAudioError::WARNING );
4426 // update stream state
4427 stream_.state = STREAM_RUNNING;
4429 // create WASAPI stream thread
4430 stream_.callbackInfo.thread = ( ThreadHandle ) CreateThread( NULL, 0, runWasapiThread, this, CREATE_SUSPENDED, NULL );
4432 if ( !stream_.callbackInfo.thread ) {
4433 errorText_ = "RtApiWasapi::startStream: Unable to instantiate callback thread.";
4434 error( RtAudioError::THREAD_ERROR );
4437 SetThreadPriority( ( void* ) stream_.callbackInfo.thread, stream_.callbackInfo.priority );
4438 ResumeThread( ( void* ) stream_.callbackInfo.thread );
4442 //-----------------------------------------------------------------------------
4444 void RtApiWasapi::stopStream( void )
4448 if ( stream_.state == STREAM_STOPPED ) {
4449 errorText_ = "RtApiWasapi::stopStream: The stream is already stopped.";
4450 error( RtAudioError::WARNING );
4454 // inform stream thread by setting stream state to STREAM_STOPPING
4455 stream_.state = STREAM_STOPPING;
4457 // wait until stream thread is stopped
4458 while( stream_.state != STREAM_STOPPED ) {
4462 // Wait for the last buffer to play before stopping.
4463 Sleep( 1000 * stream_.bufferSize / stream_.sampleRate );
4465 // stop capture client if applicable
4466 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) {
4467 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient->Stop();
4468 if ( FAILED( hr ) ) {
4469 errorText_ = "RtApiWasapi::stopStream: Unable to stop capture stream.";
4470 error( RtAudioError::DRIVER_ERROR );
4475 // stop render client if applicable
4476 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) {
4477 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient->Stop();
4478 if ( FAILED( hr ) ) {
4479 errorText_ = "RtApiWasapi::stopStream: Unable to stop render stream.";
4480 error( RtAudioError::DRIVER_ERROR );
4485 // close thread handle
4486 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
4487 errorText_ = "RtApiWasapi::stopStream: Unable to close callback thread.";
4488 error( RtAudioError::THREAD_ERROR );
4492 stream_.callbackInfo.thread = (ThreadHandle) NULL;
4495 //-----------------------------------------------------------------------------
4497 void RtApiWasapi::abortStream( void )
4501 if ( stream_.state == STREAM_STOPPED ) {
4502 errorText_ = "RtApiWasapi::abortStream: The stream is already stopped.";
4503 error( RtAudioError::WARNING );
4507 // inform stream thread by setting stream state to STREAM_STOPPING
4508 stream_.state = STREAM_STOPPING;
4510 // wait until stream thread is stopped
4511 while ( stream_.state != STREAM_STOPPED ) {
4515 // stop capture client if applicable
4516 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) {
4517 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient->Stop();
4518 if ( FAILED( hr ) ) {
4519 errorText_ = "RtApiWasapi::abortStream: Unable to stop capture stream.";
4520 error( RtAudioError::DRIVER_ERROR );
4525 // stop render client if applicable
4526 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) {
4527 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient->Stop();
4528 if ( FAILED( hr ) ) {
4529 errorText_ = "RtApiWasapi::abortStream: Unable to stop render stream.";
4530 error( RtAudioError::DRIVER_ERROR );
4535 // close thread handle
4536 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
4537 errorText_ = "RtApiWasapi::abortStream: Unable to close callback thread.";
4538 error( RtAudioError::THREAD_ERROR );
4542 stream_.callbackInfo.thread = (ThreadHandle) NULL;
4545 //-----------------------------------------------------------------------------
4547 bool RtApiWasapi::probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
4548 unsigned int firstChannel, unsigned int sampleRate,
4549 RtAudioFormat format, unsigned int* bufferSize,
4550 RtAudio::StreamOptions* options )
4552 bool methodResult = FAILURE;
4553 unsigned int captureDeviceCount = 0;
4554 unsigned int renderDeviceCount = 0;
4556 IMMDeviceCollection* captureDevices = NULL;
4557 IMMDeviceCollection* renderDevices = NULL;
4558 IMMDevice* devicePtr = NULL;
4559 WAVEFORMATEX* deviceFormat = NULL;
4560 unsigned int bufferBytes;
4561 stream_.state = STREAM_STOPPED;
4563 // create API Handle if not already created
4564 if ( !stream_.apiHandle )
4565 stream_.apiHandle = ( void* ) new WasapiHandle();
4567 // Count capture devices
4569 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4570 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4571 if ( FAILED( hr ) ) {
4572 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device collection.";
4576 hr = captureDevices->GetCount( &captureDeviceCount );
4577 if ( FAILED( hr ) ) {
4578 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device count.";
4582 // Count render devices
4583 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4584 if ( FAILED( hr ) ) {
4585 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device collection.";
4589 hr = renderDevices->GetCount( &renderDeviceCount );
4590 if ( FAILED( hr ) ) {
4591 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device count.";
4595 // validate device index
4596 if ( device >= captureDeviceCount + renderDeviceCount ) {
4597 errorType = RtAudioError::INVALID_USE;
4598 errorText_ = "RtApiWasapi::probeDeviceOpen: Invalid device index.";
4602 // determine whether index falls within capture or render devices
4603 if ( device >= renderDeviceCount ) {
4604 if ( mode != INPUT ) {
4605 errorType = RtAudioError::INVALID_USE;
4606 errorText_ = "RtApiWasapi::probeDeviceOpen: Capture device selected as output device.";
4610 // retrieve captureAudioClient from devicePtr
4611 IAudioClient*& captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4613 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
4614 if ( FAILED( hr ) ) {
4615 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device handle.";
4619 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4620 NULL, ( void** ) &captureAudioClient );
4621 if ( FAILED( hr ) ) {
4622 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device audio client.";
4626 hr = captureAudioClient->GetMixFormat( &deviceFormat );
4627 if ( FAILED( hr ) ) {
4628 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device mix format.";
4632 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4633 captureAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4636 if ( mode != OUTPUT ) {
4637 errorType = RtAudioError::INVALID_USE;
4638 errorText_ = "RtApiWasapi::probeDeviceOpen: Render device selected as input device.";
4642 // retrieve renderAudioClient from devicePtr
4643 IAudioClient*& renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4645 hr = renderDevices->Item( device, &devicePtr );
4646 if ( FAILED( hr ) ) {
4647 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device handle.";
4651 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4652 NULL, ( void** ) &renderAudioClient );
4653 if ( FAILED( hr ) ) {
4654 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device audio client.";
4658 hr = renderAudioClient->GetMixFormat( &deviceFormat );
4659 if ( FAILED( hr ) ) {
4660 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device mix format.";
4664 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4665 renderAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4669 if ( ( stream_.mode == OUTPUT && mode == INPUT ) ||
4670 ( stream_.mode == INPUT && mode == OUTPUT ) ) {
4671 stream_.mode = DUPLEX;
4674 stream_.mode = mode;
4677 stream_.device[mode] = device;
4678 stream_.doByteSwap[mode] = false;
4679 stream_.sampleRate = sampleRate;
4680 stream_.bufferSize = *bufferSize;
4681 stream_.nBuffers = 1;
4682 stream_.nUserChannels[mode] = channels;
4683 stream_.channelOffset[mode] = firstChannel;
4684 stream_.userFormat = format;
4685 stream_.deviceFormat[mode] = getDeviceInfo( device ).nativeFormats;
4687 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
4688 stream_.userInterleaved = false;
4690 stream_.userInterleaved = true;
4691 stream_.deviceInterleaved[mode] = true;
4693 // Set flags for buffer conversion.
4694 stream_.doConvertBuffer[mode] = false;
4695 if ( stream_.userFormat != stream_.deviceFormat[mode] ||
4696 stream_.nUserChannels != stream_.nDeviceChannels )
4697 stream_.doConvertBuffer[mode] = true;
4698 else if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
4699 stream_.nUserChannels[mode] > 1 )
4700 stream_.doConvertBuffer[mode] = true;
4702 if ( stream_.doConvertBuffer[mode] )
4703 setConvertInfo( mode, 0 );
4705 // Allocate necessary internal buffers
4706 bufferBytes = stream_.nUserChannels[mode] * stream_.bufferSize * formatBytes( stream_.userFormat );
4708 stream_.userBuffer[mode] = ( char* ) calloc( bufferBytes, 1 );
4709 if ( !stream_.userBuffer[mode] ) {
4710 errorType = RtAudioError::MEMORY_ERROR;
4711 errorText_ = "RtApiWasapi::probeDeviceOpen: Error allocating user buffer memory.";
4715 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME )
4716 stream_.callbackInfo.priority = 15;
4718 stream_.callbackInfo.priority = 0;
4720 ///! TODO: RTAUDIO_MINIMIZE_LATENCY // Provide stream buffers directly to callback
4721 ///! TODO: RTAUDIO_HOG_DEVICE // Exclusive mode
4723 methodResult = SUCCESS;
4727 SAFE_RELEASE( captureDevices );
4728 SAFE_RELEASE( renderDevices );
4729 SAFE_RELEASE( devicePtr );
4730 CoTaskMemFree( deviceFormat );
4732 // if method failed, close the stream
4733 if ( methodResult == FAILURE )
4736 if ( !errorText_.empty() )
4738 return methodResult;
4741 //=============================================================================
4743 DWORD WINAPI RtApiWasapi::runWasapiThread( void* wasapiPtr )
4746 ( ( RtApiWasapi* ) wasapiPtr )->wasapiThread();
4751 DWORD WINAPI RtApiWasapi::stopWasapiThread( void* wasapiPtr )
4754 ( ( RtApiWasapi* ) wasapiPtr )->stopStream();
4759 DWORD WINAPI RtApiWasapi::abortWasapiThread( void* wasapiPtr )
4762 ( ( RtApiWasapi* ) wasapiPtr )->abortStream();
4767 //-----------------------------------------------------------------------------
4769 void RtApiWasapi::wasapiThread()
4771 // as this is a new thread, we must CoInitialize it
4772 CoInitialize( NULL );
4776 IAudioClient* captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4777 IAudioClient* renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4778 IAudioCaptureClient* captureClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureClient;
4779 IAudioRenderClient* renderClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderClient;
4780 HANDLE captureEvent = ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent;
4781 HANDLE renderEvent = ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent;
4783 WAVEFORMATEX* captureFormat = NULL;
4784 WAVEFORMATEX* renderFormat = NULL;
4785 float captureSrRatio = 0.0f;
4786 float renderSrRatio = 0.0f;
4787 WasapiBuffer captureBuffer;
4788 WasapiBuffer renderBuffer;
4790 // declare local stream variables
4791 RtAudioCallback callback = ( RtAudioCallback ) stream_.callbackInfo.callback;
4792 BYTE* streamBuffer = NULL;
4793 unsigned long captureFlags = 0;
4794 unsigned int bufferFrameCount = 0;
4795 unsigned int numFramesPadding = 0;
4796 unsigned int convBufferSize = 0;
4797 bool callbackPushed = false;
4798 bool callbackPulled = false;
4799 bool callbackStopped = false;
4800 int callbackResult = 0;
4802 // convBuffer is used to store converted buffers between WASAPI and the user
4803 char* convBuffer = NULL;
4804 unsigned int convBuffSize = 0;
4805 unsigned int deviceBuffSize = 0;
4808 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4810 // Attempt to assign "Pro Audio" characteristic to thread
4811 HMODULE AvrtDll = LoadLibrary( (LPCTSTR) "AVRT.dll" );
4813 DWORD taskIndex = 0;
4814 TAvSetMmThreadCharacteristicsPtr AvSetMmThreadCharacteristicsPtr = ( TAvSetMmThreadCharacteristicsPtr ) GetProcAddress( AvrtDll, "AvSetMmThreadCharacteristicsW" );
4815 AvSetMmThreadCharacteristicsPtr( L"Pro Audio", &taskIndex );
4816 FreeLibrary( AvrtDll );
4819 // start capture stream if applicable
4820 if ( captureAudioClient ) {
4821 hr = captureAudioClient->GetMixFormat( &captureFormat );
4822 if ( FAILED( hr ) ) {
4823 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
4827 captureSrRatio = ( ( float ) captureFormat->nSamplesPerSec / stream_.sampleRate );
4829 // initialize capture stream according to desire buffer size
4830 float desiredBufferSize = stream_.bufferSize * captureSrRatio;
4831 REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) desiredBufferSize * 10000000 / captureFormat->nSamplesPerSec );
4833 if ( !captureClient ) {
4834 hr = captureAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
4835 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
4836 desiredBufferPeriod,
4837 desiredBufferPeriod,
4840 if ( FAILED( hr ) ) {
4841 errorText_ = "RtApiWasapi::wasapiThread: Unable to initialize capture audio client.";
4845 hr = captureAudioClient->GetService( __uuidof( IAudioCaptureClient ),
4846 ( void** ) &captureClient );
4847 if ( FAILED( hr ) ) {
4848 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve capture client handle.";
4852 // configure captureEvent to trigger on every available capture buffer
4853 captureEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
4854 if ( !captureEvent ) {
4855 errorType = RtAudioError::SYSTEM_ERROR;
4856 errorText_ = "RtApiWasapi::wasapiThread: Unable to create capture event.";
4860 hr = captureAudioClient->SetEventHandle( captureEvent );
4861 if ( FAILED( hr ) ) {
4862 errorText_ = "RtApiWasapi::wasapiThread: Unable to set capture event handle.";
4866 ( ( WasapiHandle* ) stream_.apiHandle )->captureClient = captureClient;
4867 ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent = captureEvent;
4870 unsigned int inBufferSize = 0;
4871 hr = captureAudioClient->GetBufferSize( &inBufferSize );
4872 if ( FAILED( hr ) ) {
4873 errorText_ = "RtApiWasapi::wasapiThread: Unable to get capture buffer size.";
4877 // scale outBufferSize according to stream->user sample rate ratio
4878 unsigned int outBufferSize = ( unsigned int ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT];
4879 inBufferSize *= stream_.nDeviceChannels[INPUT];
4881 // set captureBuffer size
4882 captureBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[INPUT] ) );
4884 // reset the capture stream
4885 hr = captureAudioClient->Reset();
4886 if ( FAILED( hr ) ) {
4887 errorText_ = "RtApiWasapi::wasapiThread: Unable to reset capture stream.";
4891 // start the capture stream
4892 hr = captureAudioClient->Start();
4893 if ( FAILED( hr ) ) {
4894 errorText_ = "RtApiWasapi::wasapiThread: Unable to start capture stream.";
4899 // start render stream if applicable
4900 if ( renderAudioClient ) {
4901 hr = renderAudioClient->GetMixFormat( &renderFormat );
4902 if ( FAILED( hr ) ) {
4903 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
4907 renderSrRatio = ( ( float ) renderFormat->nSamplesPerSec / stream_.sampleRate );
4909 // initialize render stream according to desire buffer size
4910 float desiredBufferSize = stream_.bufferSize * renderSrRatio;
4911 REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) desiredBufferSize * 10000000 / renderFormat->nSamplesPerSec );
4913 if ( !renderClient ) {
4914 hr = renderAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
4915 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
4916 desiredBufferPeriod,
4917 desiredBufferPeriod,
4920 if ( FAILED( hr ) ) {
4921 errorText_ = "RtApiWasapi::wasapiThread: Unable to initialize render audio client.";
4925 hr = renderAudioClient->GetService( __uuidof( IAudioRenderClient ),
4926 ( void** ) &renderClient );
4927 if ( FAILED( hr ) ) {
4928 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render client handle.";
4932 // configure renderEvent to trigger on every available render buffer
4933 renderEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
4934 if ( !renderEvent ) {
4935 errorType = RtAudioError::SYSTEM_ERROR;
4936 errorText_ = "RtApiWasapi::wasapiThread: Unable to create render event.";
4940 hr = renderAudioClient->SetEventHandle( renderEvent );
4941 if ( FAILED( hr ) ) {
4942 errorText_ = "RtApiWasapi::wasapiThread: Unable to set render event handle.";
4946 ( ( WasapiHandle* ) stream_.apiHandle )->renderClient = renderClient;
4947 ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent = renderEvent;
4950 unsigned int outBufferSize = 0;
4951 hr = renderAudioClient->GetBufferSize( &outBufferSize );
4952 if ( FAILED( hr ) ) {
4953 errorText_ = "RtApiWasapi::wasapiThread: Unable to get render buffer size.";
4957 // scale inBufferSize according to user->stream sample rate ratio
4958 unsigned int inBufferSize = ( unsigned int ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT];
4959 outBufferSize *= stream_.nDeviceChannels[OUTPUT];
4961 // set renderBuffer size
4962 renderBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[OUTPUT] ) );
4964 // reset the render stream
4965 hr = renderAudioClient->Reset();
4966 if ( FAILED( hr ) ) {
4967 errorText_ = "RtApiWasapi::wasapiThread: Unable to reset render stream.";
4971 // start the render stream
4972 hr = renderAudioClient->Start();
4973 if ( FAILED( hr ) ) {
4974 errorText_ = "RtApiWasapi::wasapiThread: Unable to start render stream.";
4979 if ( stream_.mode == INPUT ) {
4980 convBuffSize = ( size_t ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
4981 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
4983 else if ( stream_.mode == OUTPUT ) {
4984 convBuffSize = ( size_t ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
4985 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
4987 else if ( stream_.mode == DUPLEX ) {
4988 convBuffSize = std::max( ( size_t ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
4989 ( size_t ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
4990 deviceBuffSize = std::max( stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
4991 stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
4994 convBuffer = ( char* ) malloc( convBuffSize );
4995 stream_.deviceBuffer = ( char* ) malloc( deviceBuffSize );
4996 if ( !convBuffer || !stream_.deviceBuffer ) {
4997 errorType = RtAudioError::MEMORY_ERROR;
4998 errorText_ = "RtApiWasapi::wasapiThread: Error allocating device buffer memory.";
5002 // stream process loop
5003 while ( stream_.state != STREAM_STOPPING ) {
5004 if ( !callbackPulled ) {
5007 // 1. Pull callback buffer from inputBuffer
5008 // 2. If 1. was successful: Convert callback buffer to user sample rate and channel count
5009 // Convert callback buffer to user format
5011 if ( captureAudioClient ) {
5012 // Pull callback buffer from inputBuffer
5013 callbackPulled = captureBuffer.pullBuffer( convBuffer,
5014 ( unsigned int ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT],
5015 stream_.deviceFormat[INPUT] );
5017 if ( callbackPulled ) {
5018 // Convert callback buffer to user sample rate
5019 convertBufferWasapi( stream_.deviceBuffer,
5021 stream_.nDeviceChannels[INPUT],
5022 captureFormat->nSamplesPerSec,
5024 ( unsigned int ) ( stream_.bufferSize * captureSrRatio ),
5026 stream_.deviceFormat[INPUT] );
5028 if ( stream_.doConvertBuffer[INPUT] ) {
5029 // Convert callback buffer to user format
5030 convertBuffer( stream_.userBuffer[INPUT],
5031 stream_.deviceBuffer,
5032 stream_.convertInfo[INPUT] );
5035 // no further conversion, simple copy deviceBuffer to userBuffer
5036 memcpy( stream_.userBuffer[INPUT],
5037 stream_.deviceBuffer,
5038 stream_.bufferSize * stream_.nUserChannels[INPUT] * formatBytes( stream_.userFormat ) );
5043 // if there is no capture stream, set callbackPulled flag
5044 callbackPulled = true;
5049 // 1. Execute user callback method
5050 // 2. Handle return value from callback
5052 // if callback has not requested the stream to stop
5053 if ( callbackPulled && !callbackStopped ) {
5054 // Execute user callback method
5055 callbackResult = callback( stream_.userBuffer[OUTPUT],
5056 stream_.userBuffer[INPUT],
5059 captureFlags & AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY ? RTAUDIO_INPUT_OVERFLOW : 0,
5060 stream_.callbackInfo.userData );
5062 // Handle return value from callback
5063 if ( callbackResult == 1 ) {
5064 // instantiate a thread to stop this thread
5065 HANDLE threadHandle = CreateThread( NULL, 0, stopWasapiThread, this, 0, NULL );
5066 if ( !threadHandle ) {
5067 errorType = RtAudioError::THREAD_ERROR;
5068 errorText_ = "RtApiWasapi::wasapiThread: Unable to instantiate stream stop thread.";
5071 else if ( !CloseHandle( threadHandle ) ) {
5072 errorType = RtAudioError::THREAD_ERROR;
5073 errorText_ = "RtApiWasapi::wasapiThread: Unable to close stream stop thread handle.";
5077 callbackStopped = true;
5079 else if ( callbackResult == 2 ) {
5080 // instantiate a thread to stop this thread
5081 HANDLE threadHandle = CreateThread( NULL, 0, abortWasapiThread, this, 0, NULL );
5082 if ( !threadHandle ) {
5083 errorType = RtAudioError::THREAD_ERROR;
5084 errorText_ = "RtApiWasapi::wasapiThread: Unable to instantiate stream abort thread.";
5087 else if ( !CloseHandle( threadHandle ) ) {
5088 errorType = RtAudioError::THREAD_ERROR;
5089 errorText_ = "RtApiWasapi::wasapiThread: Unable to close stream abort thread handle.";
5093 callbackStopped = true;
5100 // 1. Convert callback buffer to stream format
5101 // 2. Convert callback buffer to stream sample rate and channel count
5102 // 3. Push callback buffer into outputBuffer
5104 if ( renderAudioClient && callbackPulled ) {
5105 if ( stream_.doConvertBuffer[OUTPUT] ) {
5106 // Convert callback buffer to stream format
5107 convertBuffer( stream_.deviceBuffer,
5108 stream_.userBuffer[OUTPUT],
5109 stream_.convertInfo[OUTPUT] );
5113 // Convert callback buffer to stream sample rate
5114 convertBufferWasapi( convBuffer,
5115 stream_.deviceBuffer,
5116 stream_.nDeviceChannels[OUTPUT],
5118 renderFormat->nSamplesPerSec,
5121 stream_.deviceFormat[OUTPUT] );
5123 // Push callback buffer into outputBuffer
5124 callbackPushed = renderBuffer.pushBuffer( convBuffer,
5125 convBufferSize * stream_.nDeviceChannels[OUTPUT],
5126 stream_.deviceFormat[OUTPUT] );
5129 // if there is no render stream, set callbackPushed flag
5130 callbackPushed = true;
5135 // 1. Get capture buffer from stream
5136 // 2. Push capture buffer into inputBuffer
5137 // 3. If 2. was successful: Release capture buffer
5139 if ( captureAudioClient ) {
5140 // if the callback input buffer was not pulled from captureBuffer, wait for next capture event
5141 if ( !callbackPulled ) {
5142 WaitForSingleObject( captureEvent, INFINITE );
5145 // Get capture buffer from stream
5146 hr = captureClient->GetBuffer( &streamBuffer,
5148 &captureFlags, NULL, NULL );
5149 if ( FAILED( hr ) ) {
5150 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve capture buffer.";
5154 if ( bufferFrameCount != 0 ) {
5155 // Push capture buffer into inputBuffer
5156 if ( captureBuffer.pushBuffer( ( char* ) streamBuffer,
5157 bufferFrameCount * stream_.nDeviceChannels[INPUT],
5158 stream_.deviceFormat[INPUT] ) )
5160 // Release capture buffer
5161 hr = captureClient->ReleaseBuffer( bufferFrameCount );
5162 if ( FAILED( hr ) ) {
5163 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5169 // Inform WASAPI that capture was unsuccessful
5170 hr = captureClient->ReleaseBuffer( 0 );
5171 if ( FAILED( hr ) ) {
5172 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5179 // Inform WASAPI that capture was unsuccessful
5180 hr = captureClient->ReleaseBuffer( 0 );
5181 if ( FAILED( hr ) ) {
5182 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5190 // 1. Get render buffer from stream
5191 // 2. Pull next buffer from outputBuffer
5192 // 3. If 2. was successful: Fill render buffer with next buffer
5193 // Release render buffer
5195 if ( renderAudioClient ) {
5196 // if the callback output buffer was not pushed to renderBuffer, wait for next render event
5197 if ( callbackPulled && !callbackPushed ) {
5198 WaitForSingleObject( renderEvent, INFINITE );
5201 // Get render buffer from stream
5202 hr = renderAudioClient->GetBufferSize( &bufferFrameCount );
5203 if ( FAILED( hr ) ) {
5204 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer size.";
5208 hr = renderAudioClient->GetCurrentPadding( &numFramesPadding );
5209 if ( FAILED( hr ) ) {
5210 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer padding.";
5214 bufferFrameCount -= numFramesPadding;
5216 if ( bufferFrameCount != 0 ) {
5217 hr = renderClient->GetBuffer( bufferFrameCount, &streamBuffer );
5218 if ( FAILED( hr ) ) {
5219 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer.";
5223 // Pull next buffer from outputBuffer
5224 // Fill render buffer with next buffer
5225 if ( renderBuffer.pullBuffer( ( char* ) streamBuffer,
5226 bufferFrameCount * stream_.nDeviceChannels[OUTPUT],
5227 stream_.deviceFormat[OUTPUT] ) )
5229 // Release render buffer
5230 hr = renderClient->ReleaseBuffer( bufferFrameCount, 0 );
5231 if ( FAILED( hr ) ) {
5232 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5238 // Inform WASAPI that render was unsuccessful
5239 hr = renderClient->ReleaseBuffer( 0, 0 );
5240 if ( FAILED( hr ) ) {
5241 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5248 // Inform WASAPI that render was unsuccessful
5249 hr = renderClient->ReleaseBuffer( 0, 0 );
5250 if ( FAILED( hr ) ) {
5251 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5257 // if the callback buffer was pushed renderBuffer reset callbackPulled flag
5258 if ( callbackPushed ) {
5259 callbackPulled = false;
5261 RtApi::tickStreamTime();
5268 CoTaskMemFree( captureFormat );
5269 CoTaskMemFree( renderFormat );
5271 free ( convBuffer );
5275 // update stream state
5276 stream_.state = STREAM_STOPPED;
5278 if ( errorText_.empty() )
5284 //******************** End of __WINDOWS_WASAPI__ *********************//
5288 #if defined(__WINDOWS_DS__) // Windows DirectSound API
5290 // Modified by Robin Davies, October 2005
5291 // - Improvements to DirectX pointer chasing.
5292 // - Bug fix for non-power-of-two Asio granularity used by Edirol PCR-A30.
5293 // - Auto-call CoInitialize for DSOUND and ASIO platforms.
5294 // Various revisions for RtAudio 4.0 by Gary Scavone, April 2007
5295 // Changed device query structure for RtAudio 4.0.7, January 2010
5297 #include <mmsystem.h>
5301 #include <algorithm>
5303 #if defined(__MINGW32__)
5304 // missing from latest mingw winapi
5305 #define WAVE_FORMAT_96M08 0x00010000 /* 96 kHz, Mono, 8-bit */
5306 #define WAVE_FORMAT_96S08 0x00020000 /* 96 kHz, Stereo, 8-bit */
5307 #define WAVE_FORMAT_96M16 0x00040000 /* 96 kHz, Mono, 16-bit */
5308 #define WAVE_FORMAT_96S16 0x00080000 /* 96 kHz, Stereo, 16-bit */
5311 #define MINIMUM_DEVICE_BUFFER_SIZE 32768
5313 #ifdef _MSC_VER // if Microsoft Visual C++
5314 #pragma comment( lib, "winmm.lib" ) // then, auto-link winmm.lib. Otherwise, it has to be added manually.
5317 static inline DWORD dsPointerBetween( DWORD pointer, DWORD laterPointer, DWORD earlierPointer, DWORD bufferSize )
5319 if ( pointer > bufferSize ) pointer -= bufferSize;
5320 if ( laterPointer < earlierPointer ) laterPointer += bufferSize;
5321 if ( pointer < earlierPointer ) pointer += bufferSize;
5322 return pointer >= earlierPointer && pointer < laterPointer;
5325 // A structure to hold various information related to the DirectSound
5326 // API implementation.
5328 unsigned int drainCounter; // Tracks callback counts when draining
5329 bool internalDrain; // Indicates if stop is initiated from callback or not.
5333 UINT bufferPointer[2];
5334 DWORD dsBufferSize[2];
5335 DWORD dsPointerLeadTime[2]; // the number of bytes ahead of the safe pointer to lead by.
5339 :drainCounter(0), internalDrain(false) { id[0] = 0; id[1] = 0; buffer[0] = 0; buffer[1] = 0; xrun[0] = false; xrun[1] = false; bufferPointer[0] = 0; bufferPointer[1] = 0; }
5342 // Declarations for utility functions, callbacks, and structures
5343 // specific to the DirectSound implementation.
5344 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
5345 LPCTSTR description,
5349 static const char* getErrorString( int code );
5351 static unsigned __stdcall callbackHandler( void *ptr );
5360 : found(false) { validId[0] = false; validId[1] = false; }
5363 struct DsProbeData {
5365 std::vector<struct DsDevice>* dsDevices;
5368 RtApiDs :: RtApiDs()
5370 // Dsound will run both-threaded. If CoInitialize fails, then just
5371 // accept whatever the mainline chose for a threading model.
5372 coInitialized_ = false;
5373 HRESULT hr = CoInitialize( NULL );
5374 if ( !FAILED( hr ) ) coInitialized_ = true;
5377 RtApiDs :: ~RtApiDs()
5379 if ( stream_.state != STREAM_CLOSED ) closeStream();
5380 if ( coInitialized_ ) CoUninitialize(); // balanced call.
5383 // The DirectSound default output is always the first device.
5384 unsigned int RtApiDs :: getDefaultOutputDevice( void )
5389 // The DirectSound default input is always the first input device,
5390 // which is the first capture device enumerated.
5391 unsigned int RtApiDs :: getDefaultInputDevice( void )
5396 unsigned int RtApiDs :: getDeviceCount( void )
5398 // Set query flag for previously found devices to false, so that we
5399 // can check for any devices that have disappeared.
5400 for ( unsigned int i=0; i<dsDevices.size(); i++ )
5401 dsDevices[i].found = false;
5403 // Query DirectSound devices.
5404 struct DsProbeData probeInfo;
5405 probeInfo.isInput = false;
5406 probeInfo.dsDevices = &dsDevices;
5407 HRESULT result = DirectSoundEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
5408 if ( FAILED( result ) ) {
5409 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating output devices!";
5410 errorText_ = errorStream_.str();
5411 error( RtAudioError::WARNING );
5414 // Query DirectSoundCapture devices.
5415 probeInfo.isInput = true;
5416 result = DirectSoundCaptureEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
5417 if ( FAILED( result ) ) {
5418 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating input devices!";
5419 errorText_ = errorStream_.str();
5420 error( RtAudioError::WARNING );
5423 // Clean out any devices that may have disappeared (code update submitted by Eli Zehngut).
5424 for ( unsigned int i=0; i<dsDevices.size(); ) {
5425 if ( dsDevices[i].found == false ) dsDevices.erase( dsDevices.begin() + i );
5429 return static_cast<unsigned int>(dsDevices.size());
5432 RtAudio::DeviceInfo RtApiDs :: getDeviceInfo( unsigned int device )
5434 RtAudio::DeviceInfo info;
5435 info.probed = false;
5437 if ( dsDevices.size() == 0 ) {
5438 // Force a query of all devices
5440 if ( dsDevices.size() == 0 ) {
5441 errorText_ = "RtApiDs::getDeviceInfo: no devices found!";
5442 error( RtAudioError::INVALID_USE );
5447 if ( device >= dsDevices.size() ) {
5448 errorText_ = "RtApiDs::getDeviceInfo: device ID is invalid!";
5449 error( RtAudioError::INVALID_USE );
5454 if ( dsDevices[ device ].validId[0] == false ) goto probeInput;
5456 LPDIRECTSOUND output;
5458 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
5459 if ( FAILED( result ) ) {
5460 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
5461 errorText_ = errorStream_.str();
5462 error( RtAudioError::WARNING );
5466 outCaps.dwSize = sizeof( outCaps );
5467 result = output->GetCaps( &outCaps );
5468 if ( FAILED( result ) ) {
5470 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting capabilities!";
5471 errorText_ = errorStream_.str();
5472 error( RtAudioError::WARNING );
5476 // Get output channel information.
5477 info.outputChannels = ( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ? 2 : 1;
5479 // Get sample rate information.
5480 info.sampleRates.clear();
5481 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
5482 if ( SAMPLE_RATES[k] >= (unsigned int) outCaps.dwMinSecondarySampleRate &&
5483 SAMPLE_RATES[k] <= (unsigned int) outCaps.dwMaxSecondarySampleRate ) {
5484 info.sampleRates.push_back( SAMPLE_RATES[k] );
5486 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
5487 info.preferredSampleRate = SAMPLE_RATES[k];
5491 // Get format information.
5492 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT ) info.nativeFormats |= RTAUDIO_SINT16;
5493 if ( outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) info.nativeFormats |= RTAUDIO_SINT8;
5497 if ( getDefaultOutputDevice() == device )
5498 info.isDefaultOutput = true;
5500 if ( dsDevices[ device ].validId[1] == false ) {
5501 info.name = dsDevices[ device ].name;
5508 LPDIRECTSOUNDCAPTURE input;
5509 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
5510 if ( FAILED( result ) ) {
5511 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
5512 errorText_ = errorStream_.str();
5513 error( RtAudioError::WARNING );
5518 inCaps.dwSize = sizeof( inCaps );
5519 result = input->GetCaps( &inCaps );
5520 if ( FAILED( result ) ) {
5522 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting object capabilities (" << dsDevices[ device ].name << ")!";
5523 errorText_ = errorStream_.str();
5524 error( RtAudioError::WARNING );
5528 // Get input channel information.
5529 info.inputChannels = inCaps.dwChannels;
5531 // Get sample rate and format information.
5532 std::vector<unsigned int> rates;
5533 if ( inCaps.dwChannels >= 2 ) {
5534 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5535 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5536 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5537 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5538 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5539 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5540 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5541 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5543 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
5544 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) rates.push_back( 11025 );
5545 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) rates.push_back( 22050 );
5546 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) rates.push_back( 44100 );
5547 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) rates.push_back( 96000 );
5549 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
5550 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) rates.push_back( 11025 );
5551 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) rates.push_back( 22050 );
5552 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) rates.push_back( 44100 );
5553 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) rates.push_back( 96000 );
5556 else if ( inCaps.dwChannels == 1 ) {
5557 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5558 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5559 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5560 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5561 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5562 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5563 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5564 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5566 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
5567 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) rates.push_back( 11025 );
5568 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) rates.push_back( 22050 );
5569 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) rates.push_back( 44100 );
5570 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) rates.push_back( 96000 );
5572 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
5573 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) rates.push_back( 11025 );
5574 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) rates.push_back( 22050 );
5575 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) rates.push_back( 44100 );
5576 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) rates.push_back( 96000 );
5579 else info.inputChannels = 0; // technically, this would be an error
5583 if ( info.inputChannels == 0 ) return info;
5585 // Copy the supported rates to the info structure but avoid duplication.
5587 for ( unsigned int i=0; i<rates.size(); i++ ) {
5589 for ( unsigned int j=0; j<info.sampleRates.size(); j++ ) {
5590 if ( rates[i] == info.sampleRates[j] ) {
5595 if ( found == false ) info.sampleRates.push_back( rates[i] );
5597 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
5599 // If device opens for both playback and capture, we determine the channels.
5600 if ( info.outputChannels > 0 && info.inputChannels > 0 )
5601 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
5603 if ( device == 0 ) info.isDefaultInput = true;
5605 // Copy name and return.
5606 info.name = dsDevices[ device ].name;
5611 bool RtApiDs :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
5612 unsigned int firstChannel, unsigned int sampleRate,
5613 RtAudioFormat format, unsigned int *bufferSize,
5614 RtAudio::StreamOptions *options )
5616 if ( channels + firstChannel > 2 ) {
5617 errorText_ = "RtApiDs::probeDeviceOpen: DirectSound does not support more than 2 channels per device.";
5621 size_t nDevices = dsDevices.size();
5622 if ( nDevices == 0 ) {
5623 // This should not happen because a check is made before this function is called.
5624 errorText_ = "RtApiDs::probeDeviceOpen: no devices found!";
5628 if ( device >= nDevices ) {
5629 // This should not happen because a check is made before this function is called.
5630 errorText_ = "RtApiDs::probeDeviceOpen: device ID is invalid!";
5634 if ( mode == OUTPUT ) {
5635 if ( dsDevices[ device ].validId[0] == false ) {
5636 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support output!";
5637 errorText_ = errorStream_.str();
5641 else { // mode == INPUT
5642 if ( dsDevices[ device ].validId[1] == false ) {
5643 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support input!";
5644 errorText_ = errorStream_.str();
5649 // According to a note in PortAudio, using GetDesktopWindow()
5650 // instead of GetForegroundWindow() is supposed to avoid problems
5651 // that occur when the application's window is not the foreground
5652 // window. Also, if the application window closes before the
5653 // DirectSound buffer, DirectSound can crash. In the past, I had
5654 // problems when using GetDesktopWindow() but it seems fine now
5655 // (January 2010). I'll leave it commented here.
5656 // HWND hWnd = GetForegroundWindow();
5657 HWND hWnd = GetDesktopWindow();
5659 // Check the numberOfBuffers parameter and limit the lowest value to
5660 // two. This is a judgement call and a value of two is probably too
5661 // low for capture, but it should work for playback.
5663 if ( options ) nBuffers = options->numberOfBuffers;
5664 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) nBuffers = 2;
5665 if ( nBuffers < 2 ) nBuffers = 3;
5667 // Check the lower range of the user-specified buffer size and set
5668 // (arbitrarily) to a lower bound of 32.
5669 if ( *bufferSize < 32 ) *bufferSize = 32;
5671 // Create the wave format structure. The data format setting will
5672 // be determined later.
5673 WAVEFORMATEX waveFormat;
5674 ZeroMemory( &waveFormat, sizeof(WAVEFORMATEX) );
5675 waveFormat.wFormatTag = WAVE_FORMAT_PCM;
5676 waveFormat.nChannels = channels + firstChannel;
5677 waveFormat.nSamplesPerSec = (unsigned long) sampleRate;
5679 // Determine the device buffer size. By default, we'll use the value
5680 // defined above (32K), but we will grow it to make allowances for
5681 // very large software buffer sizes.
5682 DWORD dsBufferSize = MINIMUM_DEVICE_BUFFER_SIZE;
5683 DWORD dsPointerLeadTime = 0;
5685 void *ohandle = 0, *bhandle = 0;
5687 if ( mode == OUTPUT ) {
5689 LPDIRECTSOUND output;
5690 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
5691 if ( FAILED( result ) ) {
5692 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
5693 errorText_ = errorStream_.str();
5698 outCaps.dwSize = sizeof( outCaps );
5699 result = output->GetCaps( &outCaps );
5700 if ( FAILED( result ) ) {
5702 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting capabilities (" << dsDevices[ device ].name << ")!";
5703 errorText_ = errorStream_.str();
5707 // Check channel information.
5708 if ( channels + firstChannel == 2 && !( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ) {
5709 errorStream_ << "RtApiDs::getDeviceInfo: the output device (" << dsDevices[ device ].name << ") does not support stereo playback.";
5710 errorText_ = errorStream_.str();
5714 // Check format information. Use 16-bit format unless not
5715 // supported or user requests 8-bit.
5716 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT &&
5717 !( format == RTAUDIO_SINT8 && outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) ) {
5718 waveFormat.wBitsPerSample = 16;
5719 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
5722 waveFormat.wBitsPerSample = 8;
5723 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
5725 stream_.userFormat = format;
5727 // Update wave format structure and buffer information.
5728 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
5729 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
5730 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
5732 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
5733 while ( dsPointerLeadTime * 2U > dsBufferSize )
5736 // Set cooperative level to DSSCL_EXCLUSIVE ... sound stops when window focus changes.
5737 // result = output->SetCooperativeLevel( hWnd, DSSCL_EXCLUSIVE );
5738 // Set cooperative level to DSSCL_PRIORITY ... sound remains when window focus changes.
5739 result = output->SetCooperativeLevel( hWnd, DSSCL_PRIORITY );
5740 if ( FAILED( result ) ) {
5742 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting cooperative level (" << dsDevices[ device ].name << ")!";
5743 errorText_ = errorStream_.str();
5747 // Even though we will write to the secondary buffer, we need to
5748 // access the primary buffer to set the correct output format
5749 // (since the default is 8-bit, 22 kHz!). Setup the DS primary
5750 // buffer description.
5751 DSBUFFERDESC bufferDescription;
5752 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
5753 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
5754 bufferDescription.dwFlags = DSBCAPS_PRIMARYBUFFER;
5756 // Obtain the primary buffer
5757 LPDIRECTSOUNDBUFFER buffer;
5758 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
5759 if ( FAILED( result ) ) {
5761 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") accessing primary buffer (" << dsDevices[ device ].name << ")!";
5762 errorText_ = errorStream_.str();
5766 // Set the primary DS buffer sound format.
5767 result = buffer->SetFormat( &waveFormat );
5768 if ( FAILED( result ) ) {
5770 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting primary buffer format (" << dsDevices[ device ].name << ")!";
5771 errorText_ = errorStream_.str();
5775 // Setup the secondary DS buffer description.
5776 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
5777 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
5778 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
5779 DSBCAPS_GLOBALFOCUS |
5780 DSBCAPS_GETCURRENTPOSITION2 |
5781 DSBCAPS_LOCHARDWARE ); // Force hardware mixing
5782 bufferDescription.dwBufferBytes = dsBufferSize;
5783 bufferDescription.lpwfxFormat = &waveFormat;
5785 // Try to create the secondary DS buffer. If that doesn't work,
5786 // try to use software mixing. Otherwise, there's a problem.
5787 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
5788 if ( FAILED( result ) ) {
5789 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
5790 DSBCAPS_GLOBALFOCUS |
5791 DSBCAPS_GETCURRENTPOSITION2 |
5792 DSBCAPS_LOCSOFTWARE ); // Force software mixing
5793 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
5794 if ( FAILED( result ) ) {
5796 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating secondary buffer (" << dsDevices[ device ].name << ")!";
5797 errorText_ = errorStream_.str();
5802 // Get the buffer size ... might be different from what we specified.
5804 dsbcaps.dwSize = sizeof( DSBCAPS );
5805 result = buffer->GetCaps( &dsbcaps );
5806 if ( FAILED( result ) ) {
5809 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
5810 errorText_ = errorStream_.str();
5814 dsBufferSize = dsbcaps.dwBufferBytes;
5816 // Lock the DS buffer
5819 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
5820 if ( FAILED( result ) ) {
5823 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking buffer (" << dsDevices[ device ].name << ")!";
5824 errorText_ = errorStream_.str();
5828 // Zero the DS buffer
5829 ZeroMemory( audioPtr, dataLen );
5831 // Unlock the DS buffer
5832 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
5833 if ( FAILED( result ) ) {
5836 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking buffer (" << dsDevices[ device ].name << ")!";
5837 errorText_ = errorStream_.str();
5841 ohandle = (void *) output;
5842 bhandle = (void *) buffer;
5845 if ( mode == INPUT ) {
5847 LPDIRECTSOUNDCAPTURE input;
5848 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
5849 if ( FAILED( result ) ) {
5850 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
5851 errorText_ = errorStream_.str();
5856 inCaps.dwSize = sizeof( inCaps );
5857 result = input->GetCaps( &inCaps );
5858 if ( FAILED( result ) ) {
5860 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting input capabilities (" << dsDevices[ device ].name << ")!";
5861 errorText_ = errorStream_.str();
5865 // Check channel information.
5866 if ( inCaps.dwChannels < channels + firstChannel ) {
5867 errorText_ = "RtApiDs::getDeviceInfo: the input device does not support requested input channels.";
5871 // Check format information. Use 16-bit format unless user
5873 DWORD deviceFormats;
5874 if ( channels + firstChannel == 2 ) {
5875 deviceFormats = WAVE_FORMAT_1S08 | WAVE_FORMAT_2S08 | WAVE_FORMAT_4S08 | WAVE_FORMAT_96S08;
5876 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
5877 waveFormat.wBitsPerSample = 8;
5878 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
5880 else { // assume 16-bit is supported
5881 waveFormat.wBitsPerSample = 16;
5882 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
5885 else { // channel == 1
5886 deviceFormats = WAVE_FORMAT_1M08 | WAVE_FORMAT_2M08 | WAVE_FORMAT_4M08 | WAVE_FORMAT_96M08;
5887 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
5888 waveFormat.wBitsPerSample = 8;
5889 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
5891 else { // assume 16-bit is supported
5892 waveFormat.wBitsPerSample = 16;
5893 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
5896 stream_.userFormat = format;
5898 // Update wave format structure and buffer information.
5899 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
5900 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
5901 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
5903 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
5904 while ( dsPointerLeadTime * 2U > dsBufferSize )
5907 // Setup the secondary DS buffer description.
5908 DSCBUFFERDESC bufferDescription;
5909 ZeroMemory( &bufferDescription, sizeof( DSCBUFFERDESC ) );
5910 bufferDescription.dwSize = sizeof( DSCBUFFERDESC );
5911 bufferDescription.dwFlags = 0;
5912 bufferDescription.dwReserved = 0;
5913 bufferDescription.dwBufferBytes = dsBufferSize;
5914 bufferDescription.lpwfxFormat = &waveFormat;
5916 // Create the capture buffer.
5917 LPDIRECTSOUNDCAPTUREBUFFER buffer;
5918 result = input->CreateCaptureBuffer( &bufferDescription, &buffer, NULL );
5919 if ( FAILED( result ) ) {
5921 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating input buffer (" << dsDevices[ device ].name << ")!";
5922 errorText_ = errorStream_.str();
5926 // Get the buffer size ... might be different from what we specified.
5928 dscbcaps.dwSize = sizeof( DSCBCAPS );
5929 result = buffer->GetCaps( &dscbcaps );
5930 if ( FAILED( result ) ) {
5933 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
5934 errorText_ = errorStream_.str();
5938 dsBufferSize = dscbcaps.dwBufferBytes;
5940 // NOTE: We could have a problem here if this is a duplex stream
5941 // and the play and capture hardware buffer sizes are different
5942 // (I'm actually not sure if that is a problem or not).
5943 // Currently, we are not verifying that.
5945 // Lock the capture buffer
5948 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
5949 if ( FAILED( result ) ) {
5952 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking input buffer (" << dsDevices[ device ].name << ")!";
5953 errorText_ = errorStream_.str();
5958 ZeroMemory( audioPtr, dataLen );
5960 // Unlock the buffer
5961 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
5962 if ( FAILED( result ) ) {
5965 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking input buffer (" << dsDevices[ device ].name << ")!";
5966 errorText_ = errorStream_.str();
5970 ohandle = (void *) input;
5971 bhandle = (void *) buffer;
5974 // Set various stream parameters
5975 DsHandle *handle = 0;
5976 stream_.nDeviceChannels[mode] = channels + firstChannel;
5977 stream_.nUserChannels[mode] = channels;
5978 stream_.bufferSize = *bufferSize;
5979 stream_.channelOffset[mode] = firstChannel;
5980 stream_.deviceInterleaved[mode] = true;
5981 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
5982 else stream_.userInterleaved = true;
5984 // Set flag for buffer conversion
5985 stream_.doConvertBuffer[mode] = false;
5986 if (stream_.nUserChannels[mode] != stream_.nDeviceChannels[mode])
5987 stream_.doConvertBuffer[mode] = true;
5988 if (stream_.userFormat != stream_.deviceFormat[mode])
5989 stream_.doConvertBuffer[mode] = true;
5990 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
5991 stream_.nUserChannels[mode] > 1 )
5992 stream_.doConvertBuffer[mode] = true;
5994 // Allocate necessary internal buffers
5995 long bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
5996 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
5997 if ( stream_.userBuffer[mode] == NULL ) {
5998 errorText_ = "RtApiDs::probeDeviceOpen: error allocating user buffer memory.";
6002 if ( stream_.doConvertBuffer[mode] ) {
6004 bool makeBuffer = true;
6005 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
6006 if ( mode == INPUT ) {
6007 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
6008 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
6009 if ( bufferBytes <= (long) bytesOut ) makeBuffer = false;
6014 bufferBytes *= *bufferSize;
6015 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
6016 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
6017 if ( stream_.deviceBuffer == NULL ) {
6018 errorText_ = "RtApiDs::probeDeviceOpen: error allocating device buffer memory.";
6024 // Allocate our DsHandle structures for the stream.
6025 if ( stream_.apiHandle == 0 ) {
6027 handle = new DsHandle;
6029 catch ( std::bad_alloc& ) {
6030 errorText_ = "RtApiDs::probeDeviceOpen: error allocating AsioHandle memory.";
6034 // Create a manual-reset event.
6035 handle->condition = CreateEvent( NULL, // no security
6036 TRUE, // manual-reset
6037 FALSE, // non-signaled initially
6039 stream_.apiHandle = (void *) handle;
6042 handle = (DsHandle *) stream_.apiHandle;
6043 handle->id[mode] = ohandle;
6044 handle->buffer[mode] = bhandle;
6045 handle->dsBufferSize[mode] = dsBufferSize;
6046 handle->dsPointerLeadTime[mode] = dsPointerLeadTime;
6048 stream_.device[mode] = device;
6049 stream_.state = STREAM_STOPPED;
6050 if ( stream_.mode == OUTPUT && mode == INPUT )
6051 // We had already set up an output stream.
6052 stream_.mode = DUPLEX;
6054 stream_.mode = mode;
6055 stream_.nBuffers = nBuffers;
6056 stream_.sampleRate = sampleRate;
6058 // Setup the buffer conversion information structure.
6059 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
6061 // Setup the callback thread.
6062 if ( stream_.callbackInfo.isRunning == false ) {
6064 stream_.callbackInfo.isRunning = true;
6065 stream_.callbackInfo.object = (void *) this;
6066 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &callbackHandler,
6067 &stream_.callbackInfo, 0, &threadId );
6068 if ( stream_.callbackInfo.thread == 0 ) {
6069 errorText_ = "RtApiDs::probeDeviceOpen: error creating callback thread!";
6073 // Boost DS thread priority
6074 SetThreadPriority( (HANDLE) stream_.callbackInfo.thread, THREAD_PRIORITY_HIGHEST );
6080 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
6081 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
6082 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6083 if ( buffer ) buffer->Release();
6086 if ( handle->buffer[1] ) {
6087 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
6088 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6089 if ( buffer ) buffer->Release();
6092 CloseHandle( handle->condition );
6094 stream_.apiHandle = 0;
6097 for ( int i=0; i<2; i++ ) {
6098 if ( stream_.userBuffer[i] ) {
6099 free( stream_.userBuffer[i] );
6100 stream_.userBuffer[i] = 0;
6104 if ( stream_.deviceBuffer ) {
6105 free( stream_.deviceBuffer );
6106 stream_.deviceBuffer = 0;
6109 stream_.state = STREAM_CLOSED;
6113 void RtApiDs :: closeStream()
6115 if ( stream_.state == STREAM_CLOSED ) {
6116 errorText_ = "RtApiDs::closeStream(): no open stream to close!";
6117 error( RtAudioError::WARNING );
6121 // Stop the callback thread.
6122 stream_.callbackInfo.isRunning = false;
6123 WaitForSingleObject( (HANDLE) stream_.callbackInfo.thread, INFINITE );
6124 CloseHandle( (HANDLE) stream_.callbackInfo.thread );
6126 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6128 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
6129 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
6130 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6137 if ( handle->buffer[1] ) {
6138 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
6139 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6146 CloseHandle( handle->condition );
6148 stream_.apiHandle = 0;
6151 for ( int i=0; i<2; i++ ) {
6152 if ( stream_.userBuffer[i] ) {
6153 free( stream_.userBuffer[i] );
6154 stream_.userBuffer[i] = 0;
6158 if ( stream_.deviceBuffer ) {
6159 free( stream_.deviceBuffer );
6160 stream_.deviceBuffer = 0;
6163 stream_.mode = UNINITIALIZED;
6164 stream_.state = STREAM_CLOSED;
6167 void RtApiDs :: startStream()
6170 if ( stream_.state == STREAM_RUNNING ) {
6171 errorText_ = "RtApiDs::startStream(): the stream is already running!";
6172 error( RtAudioError::WARNING );
6176 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6178 // Increase scheduler frequency on lesser windows (a side-effect of
6179 // increasing timer accuracy). On greater windows (Win2K or later),
6180 // this is already in effect.
6181 timeBeginPeriod( 1 );
6183 buffersRolling = false;
6184 duplexPrerollBytes = 0;
6186 if ( stream_.mode == DUPLEX ) {
6187 // 0.5 seconds of silence in DUPLEX mode while the devices spin up and synchronize.
6188 duplexPrerollBytes = (int) ( 0.5 * stream_.sampleRate * formatBytes( stream_.deviceFormat[1] ) * stream_.nDeviceChannels[1] );
6192 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6194 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6195 result = buffer->Play( 0, 0, DSBPLAY_LOOPING );
6196 if ( FAILED( result ) ) {
6197 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting output buffer!";
6198 errorText_ = errorStream_.str();
6203 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6205 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6206 result = buffer->Start( DSCBSTART_LOOPING );
6207 if ( FAILED( result ) ) {
6208 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting input buffer!";
6209 errorText_ = errorStream_.str();
6214 handle->drainCounter = 0;
6215 handle->internalDrain = false;
6216 ResetEvent( handle->condition );
6217 stream_.state = STREAM_RUNNING;
6220 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
6223 void RtApiDs :: stopStream()
6226 if ( stream_.state == STREAM_STOPPED ) {
6227 errorText_ = "RtApiDs::stopStream(): the stream is already stopped!";
6228 error( RtAudioError::WARNING );
6235 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6236 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6237 if ( handle->drainCounter == 0 ) {
6238 handle->drainCounter = 2;
6239 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
6242 stream_.state = STREAM_STOPPED;
6244 MUTEX_LOCK( &stream_.mutex );
6246 // Stop the buffer and clear memory
6247 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6248 result = buffer->Stop();
6249 if ( FAILED( result ) ) {
6250 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping output buffer!";
6251 errorText_ = errorStream_.str();
6255 // Lock the buffer and clear it so that if we start to play again,
6256 // we won't have old data playing.
6257 result = buffer->Lock( 0, handle->dsBufferSize[0], &audioPtr, &dataLen, NULL, NULL, 0 );
6258 if ( FAILED( result ) ) {
6259 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking output buffer!";
6260 errorText_ = errorStream_.str();
6264 // Zero the DS buffer
6265 ZeroMemory( audioPtr, dataLen );
6267 // Unlock the DS buffer
6268 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6269 if ( FAILED( result ) ) {
6270 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking output buffer!";
6271 errorText_ = errorStream_.str();
6275 // If we start playing again, we must begin at beginning of buffer.
6276 handle->bufferPointer[0] = 0;
6279 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6280 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6284 stream_.state = STREAM_STOPPED;
6286 if ( stream_.mode != DUPLEX )
6287 MUTEX_LOCK( &stream_.mutex );
6289 result = buffer->Stop();
6290 if ( FAILED( result ) ) {
6291 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping input buffer!";
6292 errorText_ = errorStream_.str();
6296 // Lock the buffer and clear it so that if we start to play again,
6297 // we won't have old data playing.
6298 result = buffer->Lock( 0, handle->dsBufferSize[1], &audioPtr, &dataLen, NULL, NULL, 0 );
6299 if ( FAILED( result ) ) {
6300 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking input buffer!";
6301 errorText_ = errorStream_.str();
6305 // Zero the DS buffer
6306 ZeroMemory( audioPtr, dataLen );
6308 // Unlock the DS buffer
6309 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6310 if ( FAILED( result ) ) {
6311 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking input buffer!";
6312 errorText_ = errorStream_.str();
6316 // If we start recording again, we must begin at beginning of buffer.
6317 handle->bufferPointer[1] = 0;
6321 timeEndPeriod( 1 ); // revert to normal scheduler frequency on lesser windows.
6322 MUTEX_UNLOCK( &stream_.mutex );
6324 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
6327 void RtApiDs :: abortStream()
6330 if ( stream_.state == STREAM_STOPPED ) {
6331 errorText_ = "RtApiDs::abortStream(): the stream is already stopped!";
6332 error( RtAudioError::WARNING );
6336 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6337 handle->drainCounter = 2;
6342 void RtApiDs :: callbackEvent()
6344 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) {
6345 Sleep( 50 ); // sleep 50 milliseconds
6349 if ( stream_.state == STREAM_CLOSED ) {
6350 errorText_ = "RtApiDs::callbackEvent(): the stream is closed ... this shouldn't happen!";
6351 error( RtAudioError::WARNING );
6355 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
6356 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6358 // Check if we were draining the stream and signal is finished.
6359 if ( handle->drainCounter > stream_.nBuffers + 2 ) {
6361 stream_.state = STREAM_STOPPING;
6362 if ( handle->internalDrain == false )
6363 SetEvent( handle->condition );
6369 // Invoke user callback to get fresh output data UNLESS we are
6371 if ( handle->drainCounter == 0 ) {
6372 RtAudioCallback callback = (RtAudioCallback) info->callback;
6373 double streamTime = getStreamTime();
6374 RtAudioStreamStatus status = 0;
6375 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
6376 status |= RTAUDIO_OUTPUT_UNDERFLOW;
6377 handle->xrun[0] = false;
6379 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
6380 status |= RTAUDIO_INPUT_OVERFLOW;
6381 handle->xrun[1] = false;
6383 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
6384 stream_.bufferSize, streamTime, status, info->userData );
6385 if ( cbReturnValue == 2 ) {
6386 stream_.state = STREAM_STOPPING;
6387 handle->drainCounter = 2;
6391 else if ( cbReturnValue == 1 ) {
6392 handle->drainCounter = 1;
6393 handle->internalDrain = true;
6398 DWORD currentWritePointer, safeWritePointer;
6399 DWORD currentReadPointer, safeReadPointer;
6400 UINT nextWritePointer;
6402 LPVOID buffer1 = NULL;
6403 LPVOID buffer2 = NULL;
6404 DWORD bufferSize1 = 0;
6405 DWORD bufferSize2 = 0;
6410 MUTEX_LOCK( &stream_.mutex );
6411 if ( stream_.state == STREAM_STOPPED ) {
6412 MUTEX_UNLOCK( &stream_.mutex );
6416 if ( buffersRolling == false ) {
6417 if ( stream_.mode == DUPLEX ) {
6418 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
6420 // It takes a while for the devices to get rolling. As a result,
6421 // there's no guarantee that the capture and write device pointers
6422 // will move in lockstep. Wait here for both devices to start
6423 // rolling, and then set our buffer pointers accordingly.
6424 // e.g. Crystal Drivers: the capture buffer starts up 5700 to 9600
6425 // bytes later than the write buffer.
6427 // Stub: a serious risk of having a pre-emptive scheduling round
6428 // take place between the two GetCurrentPosition calls... but I'm
6429 // really not sure how to solve the problem. Temporarily boost to
6430 // Realtime priority, maybe; but I'm not sure what priority the
6431 // DirectSound service threads run at. We *should* be roughly
6432 // within a ms or so of correct.
6434 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6435 LPDIRECTSOUNDCAPTUREBUFFER dsCaptureBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6437 DWORD startSafeWritePointer, startSafeReadPointer;
6439 result = dsWriteBuffer->GetCurrentPosition( NULL, &startSafeWritePointer );
6440 if ( FAILED( result ) ) {
6441 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6442 errorText_ = errorStream_.str();
6443 MUTEX_UNLOCK( &stream_.mutex );
6444 error( RtAudioError::SYSTEM_ERROR );
6447 result = dsCaptureBuffer->GetCurrentPosition( NULL, &startSafeReadPointer );
6448 if ( FAILED( result ) ) {
6449 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6450 errorText_ = errorStream_.str();
6451 MUTEX_UNLOCK( &stream_.mutex );
6452 error( RtAudioError::SYSTEM_ERROR );
6456 result = dsWriteBuffer->GetCurrentPosition( NULL, &safeWritePointer );
6457 if ( FAILED( result ) ) {
6458 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6459 errorText_ = errorStream_.str();
6460 MUTEX_UNLOCK( &stream_.mutex );
6461 error( RtAudioError::SYSTEM_ERROR );
6464 result = dsCaptureBuffer->GetCurrentPosition( NULL, &safeReadPointer );
6465 if ( FAILED( result ) ) {
6466 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6467 errorText_ = errorStream_.str();
6468 MUTEX_UNLOCK( &stream_.mutex );
6469 error( RtAudioError::SYSTEM_ERROR );
6472 if ( safeWritePointer != startSafeWritePointer && safeReadPointer != startSafeReadPointer ) break;
6476 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
6478 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
6479 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
6480 handle->bufferPointer[1] = safeReadPointer;
6482 else if ( stream_.mode == OUTPUT ) {
6484 // Set the proper nextWritePosition after initial startup.
6485 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6486 result = dsWriteBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
6487 if ( FAILED( result ) ) {
6488 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6489 errorText_ = errorStream_.str();
6490 MUTEX_UNLOCK( &stream_.mutex );
6491 error( RtAudioError::SYSTEM_ERROR );
6494 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
6495 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
6498 buffersRolling = true;
6501 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6503 LPDIRECTSOUNDBUFFER dsBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6505 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
6506 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
6507 bufferBytes *= formatBytes( stream_.userFormat );
6508 memset( stream_.userBuffer[0], 0, bufferBytes );
6511 // Setup parameters and do buffer conversion if necessary.
6512 if ( stream_.doConvertBuffer[0] ) {
6513 buffer = stream_.deviceBuffer;
6514 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
6515 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[0];
6516 bufferBytes *= formatBytes( stream_.deviceFormat[0] );
6519 buffer = stream_.userBuffer[0];
6520 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
6521 bufferBytes *= formatBytes( stream_.userFormat );
6524 // No byte swapping necessary in DirectSound implementation.
6526 // Ahhh ... windoze. 16-bit data is signed but 8-bit data is
6527 // unsigned. So, we need to convert our signed 8-bit data here to
6529 if ( stream_.deviceFormat[0] == RTAUDIO_SINT8 )
6530 for ( int i=0; i<bufferBytes; i++ ) buffer[i] = (unsigned char) ( buffer[i] + 128 );
6532 DWORD dsBufferSize = handle->dsBufferSize[0];
6533 nextWritePointer = handle->bufferPointer[0];
6535 DWORD endWrite, leadPointer;
6537 // Find out where the read and "safe write" pointers are.
6538 result = dsBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
6539 if ( FAILED( result ) ) {
6540 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6541 errorText_ = errorStream_.str();
6542 MUTEX_UNLOCK( &stream_.mutex );
6543 error( RtAudioError::SYSTEM_ERROR );
6547 // We will copy our output buffer into the region between
6548 // safeWritePointer and leadPointer. If leadPointer is not
6549 // beyond the next endWrite position, wait until it is.
6550 leadPointer = safeWritePointer + handle->dsPointerLeadTime[0];
6551 //std::cout << "safeWritePointer = " << safeWritePointer << ", leadPointer = " << leadPointer << ", nextWritePointer = " << nextWritePointer << std::endl;
6552 if ( leadPointer > dsBufferSize ) leadPointer -= dsBufferSize;
6553 if ( leadPointer < nextWritePointer ) leadPointer += dsBufferSize; // unwrap offset
6554 endWrite = nextWritePointer + bufferBytes;
6556 // Check whether the entire write region is behind the play pointer.
6557 if ( leadPointer >= endWrite ) break;
6559 // If we are here, then we must wait until the leadPointer advances
6560 // beyond the end of our next write region. We use the
6561 // Sleep() function to suspend operation until that happens.
6562 double millis = ( endWrite - leadPointer ) * 1000.0;
6563 millis /= ( formatBytes( stream_.deviceFormat[0]) * stream_.nDeviceChannels[0] * stream_.sampleRate);
6564 if ( millis < 1.0 ) millis = 1.0;
6565 Sleep( (DWORD) millis );
6568 if ( dsPointerBetween( nextWritePointer, safeWritePointer, currentWritePointer, dsBufferSize )
6569 || dsPointerBetween( endWrite, safeWritePointer, currentWritePointer, dsBufferSize ) ) {
6570 // We've strayed into the forbidden zone ... resync the read pointer.
6571 handle->xrun[0] = true;
6572 nextWritePointer = safeWritePointer + handle->dsPointerLeadTime[0] - bufferBytes;
6573 if ( nextWritePointer >= dsBufferSize ) nextWritePointer -= dsBufferSize;
6574 handle->bufferPointer[0] = nextWritePointer;
6575 endWrite = nextWritePointer + bufferBytes;
6578 // Lock free space in the buffer
6579 result = dsBuffer->Lock( nextWritePointer, bufferBytes, &buffer1,
6580 &bufferSize1, &buffer2, &bufferSize2, 0 );
6581 if ( FAILED( result ) ) {
6582 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking buffer during playback!";
6583 errorText_ = errorStream_.str();
6584 MUTEX_UNLOCK( &stream_.mutex );
6585 error( RtAudioError::SYSTEM_ERROR );
6589 // Copy our buffer into the DS buffer
6590 CopyMemory( buffer1, buffer, bufferSize1 );
6591 if ( buffer2 != NULL ) CopyMemory( buffer2, buffer+bufferSize1, bufferSize2 );
6593 // Update our buffer offset and unlock sound buffer
6594 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
6595 if ( FAILED( result ) ) {
6596 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking buffer during playback!";
6597 errorText_ = errorStream_.str();
6598 MUTEX_UNLOCK( &stream_.mutex );
6599 error( RtAudioError::SYSTEM_ERROR );
6602 nextWritePointer = ( nextWritePointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
6603 handle->bufferPointer[0] = nextWritePointer;
6606 // Don't bother draining input
6607 if ( handle->drainCounter ) {
6608 handle->drainCounter++;
6612 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6614 // Setup parameters.
6615 if ( stream_.doConvertBuffer[1] ) {
6616 buffer = stream_.deviceBuffer;
6617 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[1];
6618 bufferBytes *= formatBytes( stream_.deviceFormat[1] );
6621 buffer = stream_.userBuffer[1];
6622 bufferBytes = stream_.bufferSize * stream_.nUserChannels[1];
6623 bufferBytes *= formatBytes( stream_.userFormat );
6626 LPDIRECTSOUNDCAPTUREBUFFER dsBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6627 long nextReadPointer = handle->bufferPointer[1];
6628 DWORD dsBufferSize = handle->dsBufferSize[1];
6630 // Find out where the write and "safe read" pointers are.
6631 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
6632 if ( FAILED( result ) ) {
6633 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6634 errorText_ = errorStream_.str();
6635 MUTEX_UNLOCK( &stream_.mutex );
6636 error( RtAudioError::SYSTEM_ERROR );
6640 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
6641 DWORD endRead = nextReadPointer + bufferBytes;
6643 // Handling depends on whether we are INPUT or DUPLEX.
6644 // If we're in INPUT mode then waiting is a good thing. If we're in DUPLEX mode,
6645 // then a wait here will drag the write pointers into the forbidden zone.
6647 // In DUPLEX mode, rather than wait, we will back off the read pointer until
6648 // it's in a safe position. This causes dropouts, but it seems to be the only
6649 // practical way to sync up the read and write pointers reliably, given the
6650 // the very complex relationship between phase and increment of the read and write
6653 // In order to minimize audible dropouts in DUPLEX mode, we will
6654 // provide a pre-roll period of 0.5 seconds in which we return
6655 // zeros from the read buffer while the pointers sync up.
6657 if ( stream_.mode == DUPLEX ) {
6658 if ( safeReadPointer < endRead ) {
6659 if ( duplexPrerollBytes <= 0 ) {
6660 // Pre-roll time over. Be more agressive.
6661 int adjustment = endRead-safeReadPointer;
6663 handle->xrun[1] = true;
6665 // - large adjustments: we've probably run out of CPU cycles, so just resync exactly,
6666 // and perform fine adjustments later.
6667 // - small adjustments: back off by twice as much.
6668 if ( adjustment >= 2*bufferBytes )
6669 nextReadPointer = safeReadPointer-2*bufferBytes;
6671 nextReadPointer = safeReadPointer-bufferBytes-adjustment;
6673 if ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
6677 // In pre=roll time. Just do it.
6678 nextReadPointer = safeReadPointer - bufferBytes;
6679 while ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
6681 endRead = nextReadPointer + bufferBytes;
6684 else { // mode == INPUT
6685 while ( safeReadPointer < endRead && stream_.callbackInfo.isRunning ) {
6686 // See comments for playback.
6687 double millis = (endRead - safeReadPointer) * 1000.0;
6688 millis /= ( formatBytes(stream_.deviceFormat[1]) * stream_.nDeviceChannels[1] * stream_.sampleRate);
6689 if ( millis < 1.0 ) millis = 1.0;
6690 Sleep( (DWORD) millis );
6692 // Wake up and find out where we are now.
6693 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
6694 if ( FAILED( result ) ) {
6695 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6696 errorText_ = errorStream_.str();
6697 MUTEX_UNLOCK( &stream_.mutex );
6698 error( RtAudioError::SYSTEM_ERROR );
6702 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
6706 // Lock free space in the buffer
6707 result = dsBuffer->Lock( nextReadPointer, bufferBytes, &buffer1,
6708 &bufferSize1, &buffer2, &bufferSize2, 0 );
6709 if ( FAILED( result ) ) {
6710 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking capture buffer!";
6711 errorText_ = errorStream_.str();
6712 MUTEX_UNLOCK( &stream_.mutex );
6713 error( RtAudioError::SYSTEM_ERROR );
6717 if ( duplexPrerollBytes <= 0 ) {
6718 // Copy our buffer into the DS buffer
6719 CopyMemory( buffer, buffer1, bufferSize1 );
6720 if ( buffer2 != NULL ) CopyMemory( buffer+bufferSize1, buffer2, bufferSize2 );
6723 memset( buffer, 0, bufferSize1 );
6724 if ( buffer2 != NULL ) memset( buffer + bufferSize1, 0, bufferSize2 );
6725 duplexPrerollBytes -= bufferSize1 + bufferSize2;
6728 // Update our buffer offset and unlock sound buffer
6729 nextReadPointer = ( nextReadPointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
6730 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
6731 if ( FAILED( result ) ) {
6732 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking capture buffer!";
6733 errorText_ = errorStream_.str();
6734 MUTEX_UNLOCK( &stream_.mutex );
6735 error( RtAudioError::SYSTEM_ERROR );
6738 handle->bufferPointer[1] = nextReadPointer;
6740 // No byte swapping necessary in DirectSound implementation.
6742 // If necessary, convert 8-bit data from unsigned to signed.
6743 if ( stream_.deviceFormat[1] == RTAUDIO_SINT8 )
6744 for ( int j=0; j<bufferBytes; j++ ) buffer[j] = (signed char) ( buffer[j] - 128 );
6746 // Do buffer conversion if necessary.
6747 if ( stream_.doConvertBuffer[1] )
6748 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
6752 MUTEX_UNLOCK( &stream_.mutex );
6753 RtApi::tickStreamTime();
6756 // Definitions for utility functions and callbacks
6757 // specific to the DirectSound implementation.
6759 static unsigned __stdcall callbackHandler( void *ptr )
6761 CallbackInfo *info = (CallbackInfo *) ptr;
6762 RtApiDs *object = (RtApiDs *) info->object;
6763 bool* isRunning = &info->isRunning;
6765 while ( *isRunning == true ) {
6766 object->callbackEvent();
6773 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
6774 LPCTSTR description,
6778 struct DsProbeData& probeInfo = *(struct DsProbeData*) lpContext;
6779 std::vector<struct DsDevice>& dsDevices = *probeInfo.dsDevices;
6782 bool validDevice = false;
6783 if ( probeInfo.isInput == true ) {
6785 LPDIRECTSOUNDCAPTURE object;
6787 hr = DirectSoundCaptureCreate( lpguid, &object, NULL );
6788 if ( hr != DS_OK ) return TRUE;
6790 caps.dwSize = sizeof(caps);
6791 hr = object->GetCaps( &caps );
6792 if ( hr == DS_OK ) {
6793 if ( caps.dwChannels > 0 && caps.dwFormats > 0 )
6800 LPDIRECTSOUND object;
6801 hr = DirectSoundCreate( lpguid, &object, NULL );
6802 if ( hr != DS_OK ) return TRUE;
6804 caps.dwSize = sizeof(caps);
6805 hr = object->GetCaps( &caps );
6806 if ( hr == DS_OK ) {
6807 if ( caps.dwFlags & DSCAPS_PRIMARYMONO || caps.dwFlags & DSCAPS_PRIMARYSTEREO )
6813 // If good device, then save its name and guid.
6814 std::string name = convertCharPointerToStdString( description );
6815 //if ( name == "Primary Sound Driver" || name == "Primary Sound Capture Driver" )
6816 if ( lpguid == NULL )
6817 name = "Default Device";
6818 if ( validDevice ) {
6819 for ( unsigned int i=0; i<dsDevices.size(); i++ ) {
6820 if ( dsDevices[i].name == name ) {
6821 dsDevices[i].found = true;
6822 if ( probeInfo.isInput ) {
6823 dsDevices[i].id[1] = lpguid;
6824 dsDevices[i].validId[1] = true;
6827 dsDevices[i].id[0] = lpguid;
6828 dsDevices[i].validId[0] = true;
6836 device.found = true;
6837 if ( probeInfo.isInput ) {
6838 device.id[1] = lpguid;
6839 device.validId[1] = true;
6842 device.id[0] = lpguid;
6843 device.validId[0] = true;
6845 dsDevices.push_back( device );
6851 static const char* getErrorString( int code )
6855 case DSERR_ALLOCATED:
6856 return "Already allocated";
6858 case DSERR_CONTROLUNAVAIL:
6859 return "Control unavailable";
6861 case DSERR_INVALIDPARAM:
6862 return "Invalid parameter";
6864 case DSERR_INVALIDCALL:
6865 return "Invalid call";
6868 return "Generic error";
6870 case DSERR_PRIOLEVELNEEDED:
6871 return "Priority level needed";
6873 case DSERR_OUTOFMEMORY:
6874 return "Out of memory";
6876 case DSERR_BADFORMAT:
6877 return "The sample rate or the channel format is not supported";
6879 case DSERR_UNSUPPORTED:
6880 return "Not supported";
6882 case DSERR_NODRIVER:
6885 case DSERR_ALREADYINITIALIZED:
6886 return "Already initialized";
6888 case DSERR_NOAGGREGATION:
6889 return "No aggregation";
6891 case DSERR_BUFFERLOST:
6892 return "Buffer lost";
6894 case DSERR_OTHERAPPHASPRIO:
6895 return "Another application already has priority";
6897 case DSERR_UNINITIALIZED:
6898 return "Uninitialized";
6901 return "DirectSound unknown error";
6904 //******************** End of __WINDOWS_DS__ *********************//
6908 #if defined(__LINUX_ALSA__)
6910 #include <alsa/asoundlib.h>
6913 // A structure to hold various information related to the ALSA API
6916 snd_pcm_t *handles[2];
6919 pthread_cond_t runnable_cv;
6923 :synchronized(false), runnable(false) { xrun[0] = false; xrun[1] = false; }
6926 static void *alsaCallbackHandler( void * ptr );
6928 RtApiAlsa :: RtApiAlsa()
6930 // Nothing to do here.
6933 RtApiAlsa :: ~RtApiAlsa()
6935 if ( stream_.state != STREAM_CLOSED ) closeStream();
6938 unsigned int RtApiAlsa :: getDeviceCount( void )
6940 unsigned nDevices = 0;
6941 int result, subdevice, card;
6945 // Count cards and devices
6947 snd_card_next( &card );
6948 while ( card >= 0 ) {
6949 sprintf( name, "hw:%d", card );
6950 result = snd_ctl_open( &handle, name, 0 );
6952 errorStream_ << "RtApiAlsa::getDeviceCount: control open, card = " << card << ", " << snd_strerror( result ) << ".";
6953 errorText_ = errorStream_.str();
6954 error( RtAudioError::WARNING );
6959 result = snd_ctl_pcm_next_device( handle, &subdevice );
6961 errorStream_ << "RtApiAlsa::getDeviceCount: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
6962 errorText_ = errorStream_.str();
6963 error( RtAudioError::WARNING );
6966 if ( subdevice < 0 )
6971 snd_ctl_close( handle );
6972 snd_card_next( &card );
6975 result = snd_ctl_open( &handle, "default", 0 );
6978 snd_ctl_close( handle );
6984 RtAudio::DeviceInfo RtApiAlsa :: getDeviceInfo( unsigned int device )
6986 RtAudio::DeviceInfo info;
6987 info.probed = false;
6989 unsigned nDevices = 0;
6990 int result, subdevice, card;
6994 // Count cards and devices
6997 snd_card_next( &card );
6998 while ( card >= 0 ) {
6999 sprintf( name, "hw:%d", card );
7000 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
7002 errorStream_ << "RtApiAlsa::getDeviceInfo: control open, card = " << card << ", " << snd_strerror( result ) << ".";
7003 errorText_ = errorStream_.str();
7004 error( RtAudioError::WARNING );
7009 result = snd_ctl_pcm_next_device( chandle, &subdevice );
7011 errorStream_ << "RtApiAlsa::getDeviceInfo: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
7012 errorText_ = errorStream_.str();
7013 error( RtAudioError::WARNING );
7016 if ( subdevice < 0 ) break;
7017 if ( nDevices == device ) {
7018 sprintf( name, "hw:%d,%d", card, subdevice );
7024 snd_ctl_close( chandle );
7025 snd_card_next( &card );
7028 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
7029 if ( result == 0 ) {
7030 if ( nDevices == device ) {
7031 strcpy( name, "default" );
7037 if ( nDevices == 0 ) {
7038 errorText_ = "RtApiAlsa::getDeviceInfo: no devices found!";
7039 error( RtAudioError::INVALID_USE );
7043 if ( device >= nDevices ) {
7044 errorText_ = "RtApiAlsa::getDeviceInfo: device ID is invalid!";
7045 error( RtAudioError::INVALID_USE );
7051 // If a stream is already open, we cannot probe the stream devices.
7052 // Thus, use the saved results.
7053 if ( stream_.state != STREAM_CLOSED &&
7054 ( stream_.device[0] == device || stream_.device[1] == device ) ) {
7055 snd_ctl_close( chandle );
7056 if ( device >= devices_.size() ) {
7057 errorText_ = "RtApiAlsa::getDeviceInfo: device ID was not present before stream was opened.";
7058 error( RtAudioError::WARNING );
7061 return devices_[ device ];
7064 int openMode = SND_PCM_ASYNC;
7065 snd_pcm_stream_t stream;
7066 snd_pcm_info_t *pcminfo;
7067 snd_pcm_info_alloca( &pcminfo );
7069 snd_pcm_hw_params_t *params;
7070 snd_pcm_hw_params_alloca( ¶ms );
7072 // First try for playback unless default device (which has subdev -1)
7073 stream = SND_PCM_STREAM_PLAYBACK;
7074 snd_pcm_info_set_stream( pcminfo, stream );
7075 if ( subdevice != -1 ) {
7076 snd_pcm_info_set_device( pcminfo, subdevice );
7077 snd_pcm_info_set_subdevice( pcminfo, 0 );
7079 result = snd_ctl_pcm_info( chandle, pcminfo );
7081 // Device probably doesn't support playback.
7086 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK );
7088 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7089 errorText_ = errorStream_.str();
7090 error( RtAudioError::WARNING );
7094 // The device is open ... fill the parameter structure.
7095 result = snd_pcm_hw_params_any( phandle, params );
7097 snd_pcm_close( phandle );
7098 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7099 errorText_ = errorStream_.str();
7100 error( RtAudioError::WARNING );
7104 // Get output channel information.
7106 result = snd_pcm_hw_params_get_channels_max( params, &value );
7108 snd_pcm_close( phandle );
7109 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") output channels, " << snd_strerror( result ) << ".";
7110 errorText_ = errorStream_.str();
7111 error( RtAudioError::WARNING );
7114 info.outputChannels = value;
7115 snd_pcm_close( phandle );
7118 stream = SND_PCM_STREAM_CAPTURE;
7119 snd_pcm_info_set_stream( pcminfo, stream );
7121 // Now try for capture unless default device (with subdev = -1)
7122 if ( subdevice != -1 ) {
7123 result = snd_ctl_pcm_info( chandle, pcminfo );
7124 snd_ctl_close( chandle );
7126 // Device probably doesn't support capture.
7127 if ( info.outputChannels == 0 ) return info;
7128 goto probeParameters;
7132 snd_ctl_close( chandle );
7134 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
7136 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7137 errorText_ = errorStream_.str();
7138 error( RtAudioError::WARNING );
7139 if ( info.outputChannels == 0 ) return info;
7140 goto probeParameters;
7143 // The device is open ... fill the parameter structure.
7144 result = snd_pcm_hw_params_any( phandle, params );
7146 snd_pcm_close( phandle );
7147 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7148 errorText_ = errorStream_.str();
7149 error( RtAudioError::WARNING );
7150 if ( info.outputChannels == 0 ) return info;
7151 goto probeParameters;
7154 result = snd_pcm_hw_params_get_channels_max( params, &value );
7156 snd_pcm_close( phandle );
7157 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") input channels, " << snd_strerror( result ) << ".";
7158 errorText_ = errorStream_.str();
7159 error( RtAudioError::WARNING );
7160 if ( info.outputChannels == 0 ) return info;
7161 goto probeParameters;
7163 info.inputChannels = value;
7164 snd_pcm_close( phandle );
7166 // If device opens for both playback and capture, we determine the channels.
7167 if ( info.outputChannels > 0 && info.inputChannels > 0 )
7168 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
7170 // ALSA doesn't provide default devices so we'll use the first available one.
7171 if ( device == 0 && info.outputChannels > 0 )
7172 info.isDefaultOutput = true;
7173 if ( device == 0 && info.inputChannels > 0 )
7174 info.isDefaultInput = true;
7177 // At this point, we just need to figure out the supported data
7178 // formats and sample rates. We'll proceed by opening the device in
7179 // the direction with the maximum number of channels, or playback if
7180 // they are equal. This might limit our sample rate options, but so
7183 if ( info.outputChannels >= info.inputChannels )
7184 stream = SND_PCM_STREAM_PLAYBACK;
7186 stream = SND_PCM_STREAM_CAPTURE;
7187 snd_pcm_info_set_stream( pcminfo, stream );
7189 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
7191 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7192 errorText_ = errorStream_.str();
7193 error( RtAudioError::WARNING );
7197 // The device is open ... fill the parameter structure.
7198 result = snd_pcm_hw_params_any( phandle, params );
7200 snd_pcm_close( phandle );
7201 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7202 errorText_ = errorStream_.str();
7203 error( RtAudioError::WARNING );
7207 // Test our discrete set of sample rate values.
7208 info.sampleRates.clear();
7209 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
7210 if ( snd_pcm_hw_params_test_rate( phandle, params, SAMPLE_RATES[i], 0 ) == 0 ) {
7211 info.sampleRates.push_back( SAMPLE_RATES[i] );
7213 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
7214 info.preferredSampleRate = SAMPLE_RATES[i];
7217 if ( info.sampleRates.size() == 0 ) {
7218 snd_pcm_close( phandle );
7219 errorStream_ << "RtApiAlsa::getDeviceInfo: no supported sample rates found for device (" << name << ").";
7220 errorText_ = errorStream_.str();
7221 error( RtAudioError::WARNING );
7225 // Probe the supported data formats ... we don't care about endian-ness just yet
7226 snd_pcm_format_t format;
7227 info.nativeFormats = 0;
7228 format = SND_PCM_FORMAT_S8;
7229 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7230 info.nativeFormats |= RTAUDIO_SINT8;
7231 format = SND_PCM_FORMAT_S16;
7232 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7233 info.nativeFormats |= RTAUDIO_SINT16;
7234 format = SND_PCM_FORMAT_S24;
7235 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7236 info.nativeFormats |= RTAUDIO_SINT24;
7237 format = SND_PCM_FORMAT_S32;
7238 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7239 info.nativeFormats |= RTAUDIO_SINT32;
7240 format = SND_PCM_FORMAT_FLOAT;
7241 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7242 info.nativeFormats |= RTAUDIO_FLOAT32;
7243 format = SND_PCM_FORMAT_FLOAT64;
7244 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7245 info.nativeFormats |= RTAUDIO_FLOAT64;
7247 // Check that we have at least one supported format
7248 if ( info.nativeFormats == 0 ) {
7249 snd_pcm_close( phandle );
7250 errorStream_ << "RtApiAlsa::getDeviceInfo: pcm device (" << name << ") data format not supported by RtAudio.";
7251 errorText_ = errorStream_.str();
7252 error( RtAudioError::WARNING );
7256 // Get the device name
7258 result = snd_card_get_name( card, &cardname );
7259 if ( result >= 0 ) {
7260 sprintf( name, "hw:%s,%d", cardname, subdevice );
7265 // That's all ... close the device and return
7266 snd_pcm_close( phandle );
7271 void RtApiAlsa :: saveDeviceInfo( void )
7275 unsigned int nDevices = getDeviceCount();
7276 devices_.resize( nDevices );
7277 for ( unsigned int i=0; i<nDevices; i++ )
7278 devices_[i] = getDeviceInfo( i );
7281 bool RtApiAlsa :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
7282 unsigned int firstChannel, unsigned int sampleRate,
7283 RtAudioFormat format, unsigned int *bufferSize,
7284 RtAudio::StreamOptions *options )
7287 #if defined(__RTAUDIO_DEBUG__)
7289 snd_output_stdio_attach(&out, stderr, 0);
7292 // I'm not using the "plug" interface ... too much inconsistent behavior.
7294 unsigned nDevices = 0;
7295 int result, subdevice, card;
7299 if ( options && options->flags & RTAUDIO_ALSA_USE_DEFAULT )
7300 snprintf(name, sizeof(name), "%s", "default");
7302 // Count cards and devices
7304 snd_card_next( &card );
7305 while ( card >= 0 ) {
7306 sprintf( name, "hw:%d", card );
7307 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
7309 errorStream_ << "RtApiAlsa::probeDeviceOpen: control open, card = " << card << ", " << snd_strerror( result ) << ".";
7310 errorText_ = errorStream_.str();
7315 result = snd_ctl_pcm_next_device( chandle, &subdevice );
7316 if ( result < 0 ) break;
7317 if ( subdevice < 0 ) break;
7318 if ( nDevices == device ) {
7319 sprintf( name, "hw:%d,%d", card, subdevice );
7320 snd_ctl_close( chandle );
7325 snd_ctl_close( chandle );
7326 snd_card_next( &card );
7329 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
7330 if ( result == 0 ) {
7331 if ( nDevices == device ) {
7332 strcpy( name, "default" );
7338 if ( nDevices == 0 ) {
7339 // This should not happen because a check is made before this function is called.
7340 errorText_ = "RtApiAlsa::probeDeviceOpen: no devices found!";
7344 if ( device >= nDevices ) {
7345 // This should not happen because a check is made before this function is called.
7346 errorText_ = "RtApiAlsa::probeDeviceOpen: device ID is invalid!";
7353 // The getDeviceInfo() function will not work for a device that is
7354 // already open. Thus, we'll probe the system before opening a
7355 // stream and save the results for use by getDeviceInfo().
7356 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) // only do once
7357 this->saveDeviceInfo();
7359 snd_pcm_stream_t stream;
7360 if ( mode == OUTPUT )
7361 stream = SND_PCM_STREAM_PLAYBACK;
7363 stream = SND_PCM_STREAM_CAPTURE;
7366 int openMode = SND_PCM_ASYNC;
7367 result = snd_pcm_open( &phandle, name, stream, openMode );
7369 if ( mode == OUTPUT )
7370 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for output.";
7372 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for input.";
7373 errorText_ = errorStream_.str();
7377 // Fill the parameter structure.
7378 snd_pcm_hw_params_t *hw_params;
7379 snd_pcm_hw_params_alloca( &hw_params );
7380 result = snd_pcm_hw_params_any( phandle, hw_params );
7382 snd_pcm_close( phandle );
7383 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") parameters, " << snd_strerror( result ) << ".";
7384 errorText_ = errorStream_.str();
7388 #if defined(__RTAUDIO_DEBUG__)
7389 fprintf( stderr, "\nRtApiAlsa: dump hardware params just after device open:\n\n" );
7390 snd_pcm_hw_params_dump( hw_params, out );
7393 // Set access ... check user preference.
7394 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) {
7395 stream_.userInterleaved = false;
7396 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
7398 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
7399 stream_.deviceInterleaved[mode] = true;
7402 stream_.deviceInterleaved[mode] = false;
7405 stream_.userInterleaved = true;
7406 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
7408 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
7409 stream_.deviceInterleaved[mode] = false;
7412 stream_.deviceInterleaved[mode] = true;
7416 snd_pcm_close( phandle );
7417 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") access, " << snd_strerror( result ) << ".";
7418 errorText_ = errorStream_.str();
7422 // Determine how to set the device format.
7423 stream_.userFormat = format;
7424 snd_pcm_format_t deviceFormat = SND_PCM_FORMAT_UNKNOWN;
7426 if ( format == RTAUDIO_SINT8 )
7427 deviceFormat = SND_PCM_FORMAT_S8;
7428 else if ( format == RTAUDIO_SINT16 )
7429 deviceFormat = SND_PCM_FORMAT_S16;
7430 else if ( format == RTAUDIO_SINT24 )
7431 deviceFormat = SND_PCM_FORMAT_S24;
7432 else if ( format == RTAUDIO_SINT32 )
7433 deviceFormat = SND_PCM_FORMAT_S32;
7434 else if ( format == RTAUDIO_FLOAT32 )
7435 deviceFormat = SND_PCM_FORMAT_FLOAT;
7436 else if ( format == RTAUDIO_FLOAT64 )
7437 deviceFormat = SND_PCM_FORMAT_FLOAT64;
7439 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat) == 0) {
7440 stream_.deviceFormat[mode] = format;
7444 // The user requested format is not natively supported by the device.
7445 deviceFormat = SND_PCM_FORMAT_FLOAT64;
7446 if ( snd_pcm_hw_params_test_format( phandle, hw_params, deviceFormat ) == 0 ) {
7447 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
7451 deviceFormat = SND_PCM_FORMAT_FLOAT;
7452 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7453 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
7457 deviceFormat = SND_PCM_FORMAT_S32;
7458 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7459 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
7463 deviceFormat = SND_PCM_FORMAT_S24;
7464 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7465 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
7469 deviceFormat = SND_PCM_FORMAT_S16;
7470 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7471 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
7475 deviceFormat = SND_PCM_FORMAT_S8;
7476 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7477 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
7481 // If we get here, no supported format was found.
7482 snd_pcm_close( phandle );
7483 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device " << device << " data format not supported by RtAudio.";
7484 errorText_ = errorStream_.str();
7488 result = snd_pcm_hw_params_set_format( phandle, hw_params, deviceFormat );
7490 snd_pcm_close( phandle );
7491 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") data format, " << snd_strerror( result ) << ".";
7492 errorText_ = errorStream_.str();
7496 // Determine whether byte-swaping is necessary.
7497 stream_.doByteSwap[mode] = false;
7498 if ( deviceFormat != SND_PCM_FORMAT_S8 ) {
7499 result = snd_pcm_format_cpu_endian( deviceFormat );
7501 stream_.doByteSwap[mode] = true;
7502 else if (result < 0) {
7503 snd_pcm_close( phandle );
7504 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") endian-ness, " << snd_strerror( result ) << ".";
7505 errorText_ = errorStream_.str();
7510 // Set the sample rate.
7511 result = snd_pcm_hw_params_set_rate_near( phandle, hw_params, (unsigned int*) &sampleRate, 0 );
7513 snd_pcm_close( phandle );
7514 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting sample rate on device (" << name << "), " << snd_strerror( result ) << ".";
7515 errorText_ = errorStream_.str();
7519 // Determine the number of channels for this device. We support a possible
7520 // minimum device channel number > than the value requested by the user.
7521 stream_.nUserChannels[mode] = channels;
7523 result = snd_pcm_hw_params_get_channels_max( hw_params, &value );
7524 unsigned int deviceChannels = value;
7525 if ( result < 0 || deviceChannels < channels + firstChannel ) {
7526 snd_pcm_close( phandle );
7527 errorStream_ << "RtApiAlsa::probeDeviceOpen: requested channel parameters not supported by device (" << name << "), " << snd_strerror( result ) << ".";
7528 errorText_ = errorStream_.str();
7532 result = snd_pcm_hw_params_get_channels_min( hw_params, &value );
7534 snd_pcm_close( phandle );
7535 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting minimum channels for device (" << name << "), " << snd_strerror( result ) << ".";
7536 errorText_ = errorStream_.str();
7539 deviceChannels = value;
7540 if ( deviceChannels < channels + firstChannel ) deviceChannels = channels + firstChannel;
7541 stream_.nDeviceChannels[mode] = deviceChannels;
7543 // Set the device channels.
7544 result = snd_pcm_hw_params_set_channels( phandle, hw_params, deviceChannels );
7546 snd_pcm_close( phandle );
7547 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting channels for device (" << name << "), " << snd_strerror( result ) << ".";
7548 errorText_ = errorStream_.str();
7552 // Set the buffer (or period) size.
7554 snd_pcm_uframes_t periodSize = *bufferSize;
7555 result = snd_pcm_hw_params_set_period_size_near( phandle, hw_params, &periodSize, &dir );
7557 snd_pcm_close( phandle );
7558 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting period size for device (" << name << "), " << snd_strerror( result ) << ".";
7559 errorText_ = errorStream_.str();
7562 *bufferSize = periodSize;
7564 // Set the buffer number, which in ALSA is referred to as the "period".
7565 unsigned int periods = 0;
7566 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) periods = 2;
7567 if ( options && options->numberOfBuffers > 0 ) periods = options->numberOfBuffers;
7568 if ( periods < 2 ) periods = 4; // a fairly safe default value
7569 result = snd_pcm_hw_params_set_periods_near( phandle, hw_params, &periods, &dir );
7571 snd_pcm_close( phandle );
7572 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting periods for device (" << name << "), " << snd_strerror( result ) << ".";
7573 errorText_ = errorStream_.str();
7577 // If attempting to setup a duplex stream, the bufferSize parameter
7578 // MUST be the same in both directions!
7579 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
7580 snd_pcm_close( phandle );
7581 errorStream_ << "RtApiAlsa::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << name << ").";
7582 errorText_ = errorStream_.str();
7586 stream_.bufferSize = *bufferSize;
7588 // Install the hardware configuration
7589 result = snd_pcm_hw_params( phandle, hw_params );
7591 snd_pcm_close( phandle );
7592 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing hardware configuration on device (" << name << "), " << snd_strerror( result ) << ".";
7593 errorText_ = errorStream_.str();
7597 #if defined(__RTAUDIO_DEBUG__)
7598 fprintf(stderr, "\nRtApiAlsa: dump hardware params after installation:\n\n");
7599 snd_pcm_hw_params_dump( hw_params, out );
7602 // Set the software configuration to fill buffers with zeros and prevent device stopping on xruns.
7603 snd_pcm_sw_params_t *sw_params = NULL;
7604 snd_pcm_sw_params_alloca( &sw_params );
7605 snd_pcm_sw_params_current( phandle, sw_params );
7606 snd_pcm_sw_params_set_start_threshold( phandle, sw_params, *bufferSize );
7607 snd_pcm_sw_params_set_stop_threshold( phandle, sw_params, ULONG_MAX );
7608 snd_pcm_sw_params_set_silence_threshold( phandle, sw_params, 0 );
7610 // The following two settings were suggested by Theo Veenker
7611 //snd_pcm_sw_params_set_avail_min( phandle, sw_params, *bufferSize );
7612 //snd_pcm_sw_params_set_xfer_align( phandle, sw_params, 1 );
7614 // here are two options for a fix
7615 //snd_pcm_sw_params_set_silence_size( phandle, sw_params, ULONG_MAX );
7616 snd_pcm_uframes_t val;
7617 snd_pcm_sw_params_get_boundary( sw_params, &val );
7618 snd_pcm_sw_params_set_silence_size( phandle, sw_params, val );
7620 result = snd_pcm_sw_params( phandle, sw_params );
7622 snd_pcm_close( phandle );
7623 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing software configuration on device (" << name << "), " << snd_strerror( result ) << ".";
7624 errorText_ = errorStream_.str();
7628 #if defined(__RTAUDIO_DEBUG__)
7629 fprintf(stderr, "\nRtApiAlsa: dump software params after installation:\n\n");
7630 snd_pcm_sw_params_dump( sw_params, out );
7633 // Set flags for buffer conversion
7634 stream_.doConvertBuffer[mode] = false;
7635 if ( stream_.userFormat != stream_.deviceFormat[mode] )
7636 stream_.doConvertBuffer[mode] = true;
7637 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
7638 stream_.doConvertBuffer[mode] = true;
7639 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
7640 stream_.nUserChannels[mode] > 1 )
7641 stream_.doConvertBuffer[mode] = true;
7643 // Allocate the ApiHandle if necessary and then save.
7644 AlsaHandle *apiInfo = 0;
7645 if ( stream_.apiHandle == 0 ) {
7647 apiInfo = (AlsaHandle *) new AlsaHandle;
7649 catch ( std::bad_alloc& ) {
7650 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating AlsaHandle memory.";
7654 if ( pthread_cond_init( &apiInfo->runnable_cv, NULL ) ) {
7655 errorText_ = "RtApiAlsa::probeDeviceOpen: error initializing pthread condition variable.";
7659 stream_.apiHandle = (void *) apiInfo;
7660 apiInfo->handles[0] = 0;
7661 apiInfo->handles[1] = 0;
7664 apiInfo = (AlsaHandle *) stream_.apiHandle;
7666 apiInfo->handles[mode] = phandle;
7669 // Allocate necessary internal buffers.
7670 unsigned long bufferBytes;
7671 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
7672 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
7673 if ( stream_.userBuffer[mode] == NULL ) {
7674 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating user buffer memory.";
7678 if ( stream_.doConvertBuffer[mode] ) {
7680 bool makeBuffer = true;
7681 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
7682 if ( mode == INPUT ) {
7683 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
7684 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
7685 if ( bufferBytes <= bytesOut ) makeBuffer = false;
7690 bufferBytes *= *bufferSize;
7691 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
7692 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
7693 if ( stream_.deviceBuffer == NULL ) {
7694 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating device buffer memory.";
7700 stream_.sampleRate = sampleRate;
7701 stream_.nBuffers = periods;
7702 stream_.device[mode] = device;
7703 stream_.state = STREAM_STOPPED;
7705 // Setup the buffer conversion information structure.
7706 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
7708 // Setup thread if necessary.
7709 if ( stream_.mode == OUTPUT && mode == INPUT ) {
7710 // We had already set up an output stream.
7711 stream_.mode = DUPLEX;
7712 // Link the streams if possible.
7713 apiInfo->synchronized = false;
7714 if ( snd_pcm_link( apiInfo->handles[0], apiInfo->handles[1] ) == 0 )
7715 apiInfo->synchronized = true;
7717 errorText_ = "RtApiAlsa::probeDeviceOpen: unable to synchronize input and output devices.";
7718 error( RtAudioError::WARNING );
7722 stream_.mode = mode;
7724 // Setup callback thread.
7725 stream_.callbackInfo.object = (void *) this;
7727 // Set the thread attributes for joinable and realtime scheduling
7728 // priority (optional). The higher priority will only take affect
7729 // if the program is run as root or suid. Note, under Linux
7730 // processes with CAP_SYS_NICE privilege, a user can change
7731 // scheduling policy and priority (thus need not be root). See
7732 // POSIX "capabilities".
7733 pthread_attr_t attr;
7734 pthread_attr_init( &attr );
7735 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
7737 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
7738 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
7739 // We previously attempted to increase the audio callback priority
7740 // to SCHED_RR here via the attributes. However, while no errors
7741 // were reported in doing so, it did not work. So, now this is
7742 // done in the alsaCallbackHandler function.
7743 stream_.callbackInfo.doRealtime = true;
7744 int priority = options->priority;
7745 int min = sched_get_priority_min( SCHED_RR );
7746 int max = sched_get_priority_max( SCHED_RR );
7747 if ( priority < min ) priority = min;
7748 else if ( priority > max ) priority = max;
7749 stream_.callbackInfo.priority = priority;
7753 stream_.callbackInfo.isRunning = true;
7754 result = pthread_create( &stream_.callbackInfo.thread, &attr, alsaCallbackHandler, &stream_.callbackInfo );
7755 pthread_attr_destroy( &attr );
7757 stream_.callbackInfo.isRunning = false;
7758 errorText_ = "RtApiAlsa::error creating callback thread!";
7767 pthread_cond_destroy( &apiInfo->runnable_cv );
7768 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
7769 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
7771 stream_.apiHandle = 0;
7774 if ( phandle) snd_pcm_close( phandle );
7776 for ( int i=0; i<2; i++ ) {
7777 if ( stream_.userBuffer[i] ) {
7778 free( stream_.userBuffer[i] );
7779 stream_.userBuffer[i] = 0;
7783 if ( stream_.deviceBuffer ) {
7784 free( stream_.deviceBuffer );
7785 stream_.deviceBuffer = 0;
7788 stream_.state = STREAM_CLOSED;
7792 void RtApiAlsa :: closeStream()
7794 if ( stream_.state == STREAM_CLOSED ) {
7795 errorText_ = "RtApiAlsa::closeStream(): no open stream to close!";
7796 error( RtAudioError::WARNING );
7800 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
7801 stream_.callbackInfo.isRunning = false;
7802 MUTEX_LOCK( &stream_.mutex );
7803 if ( stream_.state == STREAM_STOPPED ) {
7804 apiInfo->runnable = true;
7805 pthread_cond_signal( &apiInfo->runnable_cv );
7807 MUTEX_UNLOCK( &stream_.mutex );
7808 pthread_join( stream_.callbackInfo.thread, NULL );
7810 if ( stream_.state == STREAM_RUNNING ) {
7811 stream_.state = STREAM_STOPPED;
7812 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
7813 snd_pcm_drop( apiInfo->handles[0] );
7814 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
7815 snd_pcm_drop( apiInfo->handles[1] );
7819 pthread_cond_destroy( &apiInfo->runnable_cv );
7820 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
7821 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
7823 stream_.apiHandle = 0;
7826 for ( int i=0; i<2; i++ ) {
7827 if ( stream_.userBuffer[i] ) {
7828 free( stream_.userBuffer[i] );
7829 stream_.userBuffer[i] = 0;
7833 if ( stream_.deviceBuffer ) {
7834 free( stream_.deviceBuffer );
7835 stream_.deviceBuffer = 0;
7838 stream_.mode = UNINITIALIZED;
7839 stream_.state = STREAM_CLOSED;
7842 void RtApiAlsa :: startStream()
7844 // This method calls snd_pcm_prepare if the device isn't already in that state.
7847 if ( stream_.state == STREAM_RUNNING ) {
7848 errorText_ = "RtApiAlsa::startStream(): the stream is already running!";
7849 error( RtAudioError::WARNING );
7853 MUTEX_LOCK( &stream_.mutex );
7856 snd_pcm_state_t state;
7857 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
7858 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
7859 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
7860 state = snd_pcm_state( handle[0] );
7861 if ( state != SND_PCM_STATE_PREPARED ) {
7862 result = snd_pcm_prepare( handle[0] );
7864 errorStream_ << "RtApiAlsa::startStream: error preparing output pcm device, " << snd_strerror( result ) << ".";
7865 errorText_ = errorStream_.str();
7871 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
7872 result = snd_pcm_drop(handle[1]); // fix to remove stale data received since device has been open
7873 state = snd_pcm_state( handle[1] );
7874 if ( state != SND_PCM_STATE_PREPARED ) {
7875 result = snd_pcm_prepare( handle[1] );
7877 errorStream_ << "RtApiAlsa::startStream: error preparing input pcm device, " << snd_strerror( result ) << ".";
7878 errorText_ = errorStream_.str();
7884 stream_.state = STREAM_RUNNING;
7887 apiInfo->runnable = true;
7888 pthread_cond_signal( &apiInfo->runnable_cv );
7889 MUTEX_UNLOCK( &stream_.mutex );
7891 if ( result >= 0 ) return;
7892 error( RtAudioError::SYSTEM_ERROR );
7895 void RtApiAlsa :: stopStream()
7898 if ( stream_.state == STREAM_STOPPED ) {
7899 errorText_ = "RtApiAlsa::stopStream(): the stream is already stopped!";
7900 error( RtAudioError::WARNING );
7904 stream_.state = STREAM_STOPPED;
7905 MUTEX_LOCK( &stream_.mutex );
7908 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
7909 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
7910 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
7911 if ( apiInfo->synchronized )
7912 result = snd_pcm_drop( handle[0] );
7914 result = snd_pcm_drain( handle[0] );
7916 errorStream_ << "RtApiAlsa::stopStream: error draining output pcm device, " << snd_strerror( result ) << ".";
7917 errorText_ = errorStream_.str();
7922 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
7923 result = snd_pcm_drop( handle[1] );
7925 errorStream_ << "RtApiAlsa::stopStream: error stopping input pcm device, " << snd_strerror( result ) << ".";
7926 errorText_ = errorStream_.str();
7932 apiInfo->runnable = false; // fixes high CPU usage when stopped
7933 MUTEX_UNLOCK( &stream_.mutex );
7935 if ( result >= 0 ) return;
7936 error( RtAudioError::SYSTEM_ERROR );
7939 void RtApiAlsa :: abortStream()
7942 if ( stream_.state == STREAM_STOPPED ) {
7943 errorText_ = "RtApiAlsa::abortStream(): the stream is already stopped!";
7944 error( RtAudioError::WARNING );
7948 stream_.state = STREAM_STOPPED;
7949 MUTEX_LOCK( &stream_.mutex );
7952 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
7953 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
7954 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
7955 result = snd_pcm_drop( handle[0] );
7957 errorStream_ << "RtApiAlsa::abortStream: error aborting output pcm device, " << snd_strerror( result ) << ".";
7958 errorText_ = errorStream_.str();
7963 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
7964 result = snd_pcm_drop( handle[1] );
7966 errorStream_ << "RtApiAlsa::abortStream: error aborting input pcm device, " << snd_strerror( result ) << ".";
7967 errorText_ = errorStream_.str();
7973 apiInfo->runnable = false; // fixes high CPU usage when stopped
7974 MUTEX_UNLOCK( &stream_.mutex );
7976 if ( result >= 0 ) return;
7977 error( RtAudioError::SYSTEM_ERROR );
7980 void RtApiAlsa :: callbackEvent()
7982 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
7983 if ( stream_.state == STREAM_STOPPED ) {
7984 MUTEX_LOCK( &stream_.mutex );
7985 while ( !apiInfo->runnable )
7986 pthread_cond_wait( &apiInfo->runnable_cv, &stream_.mutex );
7988 if ( stream_.state != STREAM_RUNNING ) {
7989 MUTEX_UNLOCK( &stream_.mutex );
7992 MUTEX_UNLOCK( &stream_.mutex );
7995 if ( stream_.state == STREAM_CLOSED ) {
7996 errorText_ = "RtApiAlsa::callbackEvent(): the stream is closed ... this shouldn't happen!";
7997 error( RtAudioError::WARNING );
8001 int doStopStream = 0;
8002 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
8003 double streamTime = getStreamTime();
8004 RtAudioStreamStatus status = 0;
8005 if ( stream_.mode != INPUT && apiInfo->xrun[0] == true ) {
8006 status |= RTAUDIO_OUTPUT_UNDERFLOW;
8007 apiInfo->xrun[0] = false;
8009 if ( stream_.mode != OUTPUT && apiInfo->xrun[1] == true ) {
8010 status |= RTAUDIO_INPUT_OVERFLOW;
8011 apiInfo->xrun[1] = false;
8013 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
8014 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
8016 if ( doStopStream == 2 ) {
8021 MUTEX_LOCK( &stream_.mutex );
8023 // The state might change while waiting on a mutex.
8024 if ( stream_.state == STREAM_STOPPED ) goto unlock;
8030 snd_pcm_sframes_t frames;
8031 RtAudioFormat format;
8032 handle = (snd_pcm_t **) apiInfo->handles;
8034 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
8036 // Setup parameters.
8037 if ( stream_.doConvertBuffer[1] ) {
8038 buffer = stream_.deviceBuffer;
8039 channels = stream_.nDeviceChannels[1];
8040 format = stream_.deviceFormat[1];
8043 buffer = stream_.userBuffer[1];
8044 channels = stream_.nUserChannels[1];
8045 format = stream_.userFormat;
8048 // Read samples from device in interleaved/non-interleaved format.
8049 if ( stream_.deviceInterleaved[1] )
8050 result = snd_pcm_readi( handle[1], buffer, stream_.bufferSize );
8052 void *bufs[channels];
8053 size_t offset = stream_.bufferSize * formatBytes( format );
8054 for ( int i=0; i<channels; i++ )
8055 bufs[i] = (void *) (buffer + (i * offset));
8056 result = snd_pcm_readn( handle[1], bufs, stream_.bufferSize );
8059 if ( result < (int) stream_.bufferSize ) {
8060 // Either an error or overrun occured.
8061 if ( result == -EPIPE ) {
8062 snd_pcm_state_t state = snd_pcm_state( handle[1] );
8063 if ( state == SND_PCM_STATE_XRUN ) {
8064 apiInfo->xrun[1] = true;
8065 result = snd_pcm_prepare( handle[1] );
8067 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after overrun, " << snd_strerror( result ) << ".";
8068 errorText_ = errorStream_.str();
8072 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
8073 errorText_ = errorStream_.str();
8077 errorStream_ << "RtApiAlsa::callbackEvent: audio read error, " << snd_strerror( result ) << ".";
8078 errorText_ = errorStream_.str();
8080 error( RtAudioError::WARNING );
8084 // Do byte swapping if necessary.
8085 if ( stream_.doByteSwap[1] )
8086 byteSwapBuffer( buffer, stream_.bufferSize * channels, format );
8088 // Do buffer conversion if necessary.
8089 if ( stream_.doConvertBuffer[1] )
8090 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
8092 // Check stream latency
8093 result = snd_pcm_delay( handle[1], &frames );
8094 if ( result == 0 && frames > 0 ) stream_.latency[1] = frames;
8099 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8101 // Setup parameters and do buffer conversion if necessary.
8102 if ( stream_.doConvertBuffer[0] ) {
8103 buffer = stream_.deviceBuffer;
8104 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
8105 channels = stream_.nDeviceChannels[0];
8106 format = stream_.deviceFormat[0];
8109 buffer = stream_.userBuffer[0];
8110 channels = stream_.nUserChannels[0];
8111 format = stream_.userFormat;
8114 // Do byte swapping if necessary.
8115 if ( stream_.doByteSwap[0] )
8116 byteSwapBuffer(buffer, stream_.bufferSize * channels, format);
8118 // Write samples to device in interleaved/non-interleaved format.
8119 if ( stream_.deviceInterleaved[0] )
8120 result = snd_pcm_writei( handle[0], buffer, stream_.bufferSize );
8122 void *bufs[channels];
8123 size_t offset = stream_.bufferSize * formatBytes( format );
8124 for ( int i=0; i<channels; i++ )
8125 bufs[i] = (void *) (buffer + (i * offset));
8126 result = snd_pcm_writen( handle[0], bufs, stream_.bufferSize );
8129 if ( result < (int) stream_.bufferSize ) {
8130 // Either an error or underrun occured.
8131 if ( result == -EPIPE ) {
8132 snd_pcm_state_t state = snd_pcm_state( handle[0] );
8133 if ( state == SND_PCM_STATE_XRUN ) {
8134 apiInfo->xrun[0] = true;
8135 result = snd_pcm_prepare( handle[0] );
8137 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after underrun, " << snd_strerror( result ) << ".";
8138 errorText_ = errorStream_.str();
8141 errorText_ = "RtApiAlsa::callbackEvent: audio write error, underrun.";
8144 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
8145 errorText_ = errorStream_.str();
8149 errorStream_ << "RtApiAlsa::callbackEvent: audio write error, " << snd_strerror( result ) << ".";
8150 errorText_ = errorStream_.str();
8152 error( RtAudioError::WARNING );
8156 // Check stream latency
8157 result = snd_pcm_delay( handle[0], &frames );
8158 if ( result == 0 && frames > 0 ) stream_.latency[0] = frames;
8162 MUTEX_UNLOCK( &stream_.mutex );
8164 RtApi::tickStreamTime();
8165 if ( doStopStream == 1 ) this->stopStream();
8168 static void *alsaCallbackHandler( void *ptr )
8170 CallbackInfo *info = (CallbackInfo *) ptr;
8171 RtApiAlsa *object = (RtApiAlsa *) info->object;
8172 bool *isRunning = &info->isRunning;
8174 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
8175 if ( info->doRealtime ) {
8176 pthread_t tID = pthread_self(); // ID of this thread
8177 sched_param prio = { info->priority }; // scheduling priority of thread
8178 pthread_setschedparam( tID, SCHED_RR, &prio );
8182 while ( *isRunning == true ) {
8183 pthread_testcancel();
8184 object->callbackEvent();
8187 pthread_exit( NULL );
8190 //******************** End of __LINUX_ALSA__ *********************//
8193 #if defined(__LINUX_PULSE__)
8195 // Code written by Peter Meerwald, pmeerw@pmeerw.net
8196 // and Tristan Matthews.
8198 #include <pulse/error.h>
8199 #include <pulse/simple.h>
8202 static const unsigned int SUPPORTED_SAMPLERATES[] = { 8000, 16000, 22050, 32000,
8203 44100, 48000, 96000, 0};
8205 struct rtaudio_pa_format_mapping_t {
8206 RtAudioFormat rtaudio_format;
8207 pa_sample_format_t pa_format;
8210 static const rtaudio_pa_format_mapping_t supported_sampleformats[] = {
8211 {RTAUDIO_SINT16, PA_SAMPLE_S16LE},
8212 {RTAUDIO_SINT32, PA_SAMPLE_S32LE},
8213 {RTAUDIO_FLOAT32, PA_SAMPLE_FLOAT32LE},
8214 {0, PA_SAMPLE_INVALID}};
8216 struct PulseAudioHandle {
8220 pthread_cond_t runnable_cv;
8222 PulseAudioHandle() : s_play(0), s_rec(0), runnable(false) { }
8225 RtApiPulse::~RtApiPulse()
8227 if ( stream_.state != STREAM_CLOSED )
8231 unsigned int RtApiPulse::getDeviceCount( void )
8236 RtAudio::DeviceInfo RtApiPulse::getDeviceInfo( unsigned int /*device*/ )
8238 RtAudio::DeviceInfo info;
8240 info.name = "PulseAudio";
8241 info.outputChannels = 2;
8242 info.inputChannels = 2;
8243 info.duplexChannels = 2;
8244 info.isDefaultOutput = true;
8245 info.isDefaultInput = true;
8247 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr )
8248 info.sampleRates.push_back( *sr );
8250 info.preferredSampleRate = 48000;
8251 info.nativeFormats = RTAUDIO_SINT16 | RTAUDIO_SINT32 | RTAUDIO_FLOAT32;
8256 static void *pulseaudio_callback( void * user )
8258 CallbackInfo *cbi = static_cast<CallbackInfo *>( user );
8259 RtApiPulse *context = static_cast<RtApiPulse *>( cbi->object );
8260 volatile bool *isRunning = &cbi->isRunning;
8262 while ( *isRunning ) {
8263 pthread_testcancel();
8264 context->callbackEvent();
8267 pthread_exit( NULL );
8270 void RtApiPulse::closeStream( void )
8272 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8274 stream_.callbackInfo.isRunning = false;
8276 MUTEX_LOCK( &stream_.mutex );
8277 if ( stream_.state == STREAM_STOPPED ) {
8278 pah->runnable = true;
8279 pthread_cond_signal( &pah->runnable_cv );
8281 MUTEX_UNLOCK( &stream_.mutex );
8283 pthread_join( pah->thread, 0 );
8284 if ( pah->s_play ) {
8285 pa_simple_flush( pah->s_play, NULL );
8286 pa_simple_free( pah->s_play );
8289 pa_simple_free( pah->s_rec );
8291 pthread_cond_destroy( &pah->runnable_cv );
8293 stream_.apiHandle = 0;
8296 if ( stream_.userBuffer[0] ) {
8297 free( stream_.userBuffer[0] );
8298 stream_.userBuffer[0] = 0;
8300 if ( stream_.userBuffer[1] ) {
8301 free( stream_.userBuffer[1] );
8302 stream_.userBuffer[1] = 0;
8305 stream_.state = STREAM_CLOSED;
8306 stream_.mode = UNINITIALIZED;
8309 void RtApiPulse::callbackEvent( void )
8311 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8313 if ( stream_.state == STREAM_STOPPED ) {
8314 MUTEX_LOCK( &stream_.mutex );
8315 while ( !pah->runnable )
8316 pthread_cond_wait( &pah->runnable_cv, &stream_.mutex );
8318 if ( stream_.state != STREAM_RUNNING ) {
8319 MUTEX_UNLOCK( &stream_.mutex );
8322 MUTEX_UNLOCK( &stream_.mutex );
8325 if ( stream_.state == STREAM_CLOSED ) {
8326 errorText_ = "RtApiPulse::callbackEvent(): the stream is closed ... "
8327 "this shouldn't happen!";
8328 error( RtAudioError::WARNING );
8332 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
8333 double streamTime = getStreamTime();
8334 RtAudioStreamStatus status = 0;
8335 int doStopStream = callback( stream_.userBuffer[OUTPUT], stream_.userBuffer[INPUT],
8336 stream_.bufferSize, streamTime, status,
8337 stream_.callbackInfo.userData );
8339 if ( doStopStream == 2 ) {
8344 MUTEX_LOCK( &stream_.mutex );
8345 void *pulse_in = stream_.doConvertBuffer[INPUT] ? stream_.deviceBuffer : stream_.userBuffer[INPUT];
8346 void *pulse_out = stream_.doConvertBuffer[OUTPUT] ? stream_.deviceBuffer : stream_.userBuffer[OUTPUT];
8348 if ( stream_.state != STREAM_RUNNING )
8353 if (stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8354 if ( stream_.doConvertBuffer[OUTPUT] ) {
8355 convertBuffer( stream_.deviceBuffer,
8356 stream_.userBuffer[OUTPUT],
8357 stream_.convertInfo[OUTPUT] );
8358 bytes = stream_.nDeviceChannels[OUTPUT] * stream_.bufferSize *
8359 formatBytes( stream_.deviceFormat[OUTPUT] );
8361 bytes = stream_.nUserChannels[OUTPUT] * stream_.bufferSize *
8362 formatBytes( stream_.userFormat );
8364 if ( pa_simple_write( pah->s_play, pulse_out, bytes, &pa_error ) < 0 ) {
8365 errorStream_ << "RtApiPulse::callbackEvent: audio write error, " <<
8366 pa_strerror( pa_error ) << ".";
8367 errorText_ = errorStream_.str();
8368 error( RtAudioError::WARNING );
8372 if ( stream_.mode == INPUT || stream_.mode == DUPLEX) {
8373 if ( stream_.doConvertBuffer[INPUT] )
8374 bytes = stream_.nDeviceChannels[INPUT] * stream_.bufferSize *
8375 formatBytes( stream_.deviceFormat[INPUT] );
8377 bytes = stream_.nUserChannels[INPUT] * stream_.bufferSize *
8378 formatBytes( stream_.userFormat );
8380 if ( pa_simple_read( pah->s_rec, pulse_in, bytes, &pa_error ) < 0 ) {
8381 errorStream_ << "RtApiPulse::callbackEvent: audio read error, " <<
8382 pa_strerror( pa_error ) << ".";
8383 errorText_ = errorStream_.str();
8384 error( RtAudioError::WARNING );
8386 if ( stream_.doConvertBuffer[INPUT] ) {
8387 convertBuffer( stream_.userBuffer[INPUT],
8388 stream_.deviceBuffer,
8389 stream_.convertInfo[INPUT] );
8394 MUTEX_UNLOCK( &stream_.mutex );
8395 RtApi::tickStreamTime();
8397 if ( doStopStream == 1 )
8401 void RtApiPulse::startStream( void )
8403 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8405 if ( stream_.state == STREAM_CLOSED ) {
8406 errorText_ = "RtApiPulse::startStream(): the stream is not open!";
8407 error( RtAudioError::INVALID_USE );
8410 if ( stream_.state == STREAM_RUNNING ) {
8411 errorText_ = "RtApiPulse::startStream(): the stream is already running!";
8412 error( RtAudioError::WARNING );
8416 MUTEX_LOCK( &stream_.mutex );
8418 stream_.state = STREAM_RUNNING;
8420 pah->runnable = true;
8421 pthread_cond_signal( &pah->runnable_cv );
8422 MUTEX_UNLOCK( &stream_.mutex );
8425 void RtApiPulse::stopStream( void )
8427 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8429 if ( stream_.state == STREAM_CLOSED ) {
8430 errorText_ = "RtApiPulse::stopStream(): the stream is not open!";
8431 error( RtAudioError::INVALID_USE );
8434 if ( stream_.state == STREAM_STOPPED ) {
8435 errorText_ = "RtApiPulse::stopStream(): the stream is already stopped!";
8436 error( RtAudioError::WARNING );
8440 stream_.state = STREAM_STOPPED;
8441 MUTEX_LOCK( &stream_.mutex );
8443 if ( pah && pah->s_play ) {
8445 if ( pa_simple_drain( pah->s_play, &pa_error ) < 0 ) {
8446 errorStream_ << "RtApiPulse::stopStream: error draining output device, " <<
8447 pa_strerror( pa_error ) << ".";
8448 errorText_ = errorStream_.str();
8449 MUTEX_UNLOCK( &stream_.mutex );
8450 error( RtAudioError::SYSTEM_ERROR );
8455 stream_.state = STREAM_STOPPED;
8456 MUTEX_UNLOCK( &stream_.mutex );
8459 void RtApiPulse::abortStream( void )
8461 PulseAudioHandle *pah = static_cast<PulseAudioHandle*>( stream_.apiHandle );
8463 if ( stream_.state == STREAM_CLOSED ) {
8464 errorText_ = "RtApiPulse::abortStream(): the stream is not open!";
8465 error( RtAudioError::INVALID_USE );
8468 if ( stream_.state == STREAM_STOPPED ) {
8469 errorText_ = "RtApiPulse::abortStream(): the stream is already stopped!";
8470 error( RtAudioError::WARNING );
8474 stream_.state = STREAM_STOPPED;
8475 MUTEX_LOCK( &stream_.mutex );
8477 if ( pah && pah->s_play ) {
8479 if ( pa_simple_flush( pah->s_play, &pa_error ) < 0 ) {
8480 errorStream_ << "RtApiPulse::abortStream: error flushing output device, " <<
8481 pa_strerror( pa_error ) << ".";
8482 errorText_ = errorStream_.str();
8483 MUTEX_UNLOCK( &stream_.mutex );
8484 error( RtAudioError::SYSTEM_ERROR );
8489 stream_.state = STREAM_STOPPED;
8490 MUTEX_UNLOCK( &stream_.mutex );
8493 bool RtApiPulse::probeDeviceOpen( unsigned int device, StreamMode mode,
8494 unsigned int channels, unsigned int firstChannel,
8495 unsigned int sampleRate, RtAudioFormat format,
8496 unsigned int *bufferSize, RtAudio::StreamOptions *options )
8498 PulseAudioHandle *pah = 0;
8499 unsigned long bufferBytes = 0;
8502 if ( device != 0 ) return false;
8503 if ( mode != INPUT && mode != OUTPUT ) return false;
8504 if ( channels != 1 && channels != 2 ) {
8505 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported number of channels.";
8508 ss.channels = channels;
8510 if ( firstChannel != 0 ) return false;
8512 bool sr_found = false;
8513 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr ) {
8514 if ( sampleRate == *sr ) {
8516 stream_.sampleRate = sampleRate;
8517 ss.rate = sampleRate;
8522 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported sample rate.";
8527 for ( const rtaudio_pa_format_mapping_t *sf = supported_sampleformats;
8528 sf->rtaudio_format && sf->pa_format != PA_SAMPLE_INVALID; ++sf ) {
8529 if ( format == sf->rtaudio_format ) {
8531 stream_.userFormat = sf->rtaudio_format;
8532 stream_.deviceFormat[mode] = stream_.userFormat;
8533 ss.format = sf->pa_format;
8537 if ( !sf_found ) { // Use internal data format conversion.
8538 stream_.userFormat = format;
8539 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
8540 ss.format = PA_SAMPLE_FLOAT32LE;
8543 // Set other stream parameters.
8544 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
8545 else stream_.userInterleaved = true;
8546 stream_.deviceInterleaved[mode] = true;
8547 stream_.nBuffers = 1;
8548 stream_.doByteSwap[mode] = false;
8549 stream_.nUserChannels[mode] = channels;
8550 stream_.nDeviceChannels[mode] = channels + firstChannel;
8551 stream_.channelOffset[mode] = 0;
8552 std::string streamName = "RtAudio";
8554 // Set flags for buffer conversion.
8555 stream_.doConvertBuffer[mode] = false;
8556 if ( stream_.userFormat != stream_.deviceFormat[mode] )
8557 stream_.doConvertBuffer[mode] = true;
8558 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
8559 stream_.doConvertBuffer[mode] = true;
8561 // Allocate necessary internal buffers.
8562 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
8563 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
8564 if ( stream_.userBuffer[mode] == NULL ) {
8565 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating user buffer memory.";
8568 stream_.bufferSize = *bufferSize;
8570 if ( stream_.doConvertBuffer[mode] ) {
8572 bool makeBuffer = true;
8573 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
8574 if ( mode == INPUT ) {
8575 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
8576 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
8577 if ( bufferBytes <= bytesOut ) makeBuffer = false;
8582 bufferBytes *= *bufferSize;
8583 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
8584 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
8585 if ( stream_.deviceBuffer == NULL ) {
8586 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating device buffer memory.";
8592 stream_.device[mode] = device;
8594 // Setup the buffer conversion information structure.
8595 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
8597 if ( !stream_.apiHandle ) {
8598 PulseAudioHandle *pah = new PulseAudioHandle;
8600 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating memory for handle.";
8604 stream_.apiHandle = pah;
8605 if ( pthread_cond_init( &pah->runnable_cv, NULL ) != 0 ) {
8606 errorText_ = "RtApiPulse::probeDeviceOpen: error creating condition variable.";
8610 pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8613 if ( options && !options->streamName.empty() ) streamName = options->streamName;
8616 pa_buffer_attr buffer_attr;
8617 buffer_attr.fragsize = bufferBytes;
8618 buffer_attr.maxlength = -1;
8620 pah->s_rec = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_RECORD, NULL, "Record", &ss, NULL, &buffer_attr, &error );
8621 if ( !pah->s_rec ) {
8622 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting input to PulseAudio server.";
8627 pah->s_play = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_PLAYBACK, NULL, "Playback", &ss, NULL, NULL, &error );
8628 if ( !pah->s_play ) {
8629 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting output to PulseAudio server.";
8637 if ( stream_.mode == UNINITIALIZED )
8638 stream_.mode = mode;
8639 else if ( stream_.mode == mode )
8642 stream_.mode = DUPLEX;
8644 if ( !stream_.callbackInfo.isRunning ) {
8645 stream_.callbackInfo.object = this;
8646 stream_.callbackInfo.isRunning = true;
8647 if ( pthread_create( &pah->thread, NULL, pulseaudio_callback, (void *)&stream_.callbackInfo) != 0 ) {
8648 errorText_ = "RtApiPulse::probeDeviceOpen: error creating thread.";
8653 stream_.state = STREAM_STOPPED;
8657 if ( pah && stream_.callbackInfo.isRunning ) {
8658 pthread_cond_destroy( &pah->runnable_cv );
8660 stream_.apiHandle = 0;
8663 for ( int i=0; i<2; i++ ) {
8664 if ( stream_.userBuffer[i] ) {
8665 free( stream_.userBuffer[i] );
8666 stream_.userBuffer[i] = 0;
8670 if ( stream_.deviceBuffer ) {
8671 free( stream_.deviceBuffer );
8672 stream_.deviceBuffer = 0;
8678 //******************** End of __LINUX_PULSE__ *********************//
8681 #if defined(__LINUX_OSS__)
8684 #include <sys/ioctl.h>
8687 #include <sys/soundcard.h>
8691 static void *ossCallbackHandler(void * ptr);
8693 // A structure to hold various information related to the OSS API
8696 int id[2]; // device ids
8699 pthread_cond_t runnable;
8702 :triggered(false) { id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
8705 RtApiOss :: RtApiOss()
8707 // Nothing to do here.
8710 RtApiOss :: ~RtApiOss()
8712 if ( stream_.state != STREAM_CLOSED ) closeStream();
8715 unsigned int RtApiOss :: getDeviceCount( void )
8717 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
8718 if ( mixerfd == -1 ) {
8719 errorText_ = "RtApiOss::getDeviceCount: error opening '/dev/mixer'.";
8720 error( RtAudioError::WARNING );
8724 oss_sysinfo sysinfo;
8725 if ( ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo ) == -1 ) {
8727 errorText_ = "RtApiOss::getDeviceCount: error getting sysinfo, OSS version >= 4.0 is required.";
8728 error( RtAudioError::WARNING );
8733 return sysinfo.numaudios;
8736 RtAudio::DeviceInfo RtApiOss :: getDeviceInfo( unsigned int device )
8738 RtAudio::DeviceInfo info;
8739 info.probed = false;
8741 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
8742 if ( mixerfd == -1 ) {
8743 errorText_ = "RtApiOss::getDeviceInfo: error opening '/dev/mixer'.";
8744 error( RtAudioError::WARNING );
8748 oss_sysinfo sysinfo;
8749 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
8750 if ( result == -1 ) {
8752 errorText_ = "RtApiOss::getDeviceInfo: error getting sysinfo, OSS version >= 4.0 is required.";
8753 error( RtAudioError::WARNING );
8757 unsigned nDevices = sysinfo.numaudios;
8758 if ( nDevices == 0 ) {
8760 errorText_ = "RtApiOss::getDeviceInfo: no devices found!";
8761 error( RtAudioError::INVALID_USE );
8765 if ( device >= nDevices ) {
8767 errorText_ = "RtApiOss::getDeviceInfo: device ID is invalid!";
8768 error( RtAudioError::INVALID_USE );
8772 oss_audioinfo ainfo;
8774 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
8776 if ( result == -1 ) {
8777 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
8778 errorText_ = errorStream_.str();
8779 error( RtAudioError::WARNING );
8784 if ( ainfo.caps & PCM_CAP_OUTPUT ) info.outputChannels = ainfo.max_channels;
8785 if ( ainfo.caps & PCM_CAP_INPUT ) info.inputChannels = ainfo.max_channels;
8786 if ( ainfo.caps & PCM_CAP_DUPLEX ) {
8787 if ( info.outputChannels > 0 && info.inputChannels > 0 && ainfo.caps & PCM_CAP_DUPLEX )
8788 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
8791 // Probe data formats ... do for input
8792 unsigned long mask = ainfo.iformats;
8793 if ( mask & AFMT_S16_LE || mask & AFMT_S16_BE )
8794 info.nativeFormats |= RTAUDIO_SINT16;
8795 if ( mask & AFMT_S8 )
8796 info.nativeFormats |= RTAUDIO_SINT8;
8797 if ( mask & AFMT_S32_LE || mask & AFMT_S32_BE )
8798 info.nativeFormats |= RTAUDIO_SINT32;
8800 if ( mask & AFMT_FLOAT )
8801 info.nativeFormats |= RTAUDIO_FLOAT32;
8803 if ( mask & AFMT_S24_LE || mask & AFMT_S24_BE )
8804 info.nativeFormats |= RTAUDIO_SINT24;
8806 // Check that we have at least one supported format
8807 if ( info.nativeFormats == 0 ) {
8808 errorStream_ << "RtApiOss::getDeviceInfo: device (" << ainfo.name << ") data format not supported by RtAudio.";
8809 errorText_ = errorStream_.str();
8810 error( RtAudioError::WARNING );
8814 // Probe the supported sample rates.
8815 info.sampleRates.clear();
8816 if ( ainfo.nrates ) {
8817 for ( unsigned int i=0; i<ainfo.nrates; i++ ) {
8818 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
8819 if ( ainfo.rates[i] == SAMPLE_RATES[k] ) {
8820 info.sampleRates.push_back( SAMPLE_RATES[k] );
8822 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
8823 info.preferredSampleRate = SAMPLE_RATES[k];
8831 // Check min and max rate values;
8832 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
8833 if ( ainfo.min_rate <= (int) SAMPLE_RATES[k] && ainfo.max_rate >= (int) SAMPLE_RATES[k] ) {
8834 info.sampleRates.push_back( SAMPLE_RATES[k] );
8836 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
8837 info.preferredSampleRate = SAMPLE_RATES[k];
8842 if ( info.sampleRates.size() == 0 ) {
8843 errorStream_ << "RtApiOss::getDeviceInfo: no supported sample rates found for device (" << ainfo.name << ").";
8844 errorText_ = errorStream_.str();
8845 error( RtAudioError::WARNING );
8849 info.name = ainfo.name;
8856 bool RtApiOss :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
8857 unsigned int firstChannel, unsigned int sampleRate,
8858 RtAudioFormat format, unsigned int *bufferSize,
8859 RtAudio::StreamOptions *options )
8861 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
8862 if ( mixerfd == -1 ) {
8863 errorText_ = "RtApiOss::probeDeviceOpen: error opening '/dev/mixer'.";
8867 oss_sysinfo sysinfo;
8868 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
8869 if ( result == -1 ) {
8871 errorText_ = "RtApiOss::probeDeviceOpen: error getting sysinfo, OSS version >= 4.0 is required.";
8875 unsigned nDevices = sysinfo.numaudios;
8876 if ( nDevices == 0 ) {
8877 // This should not happen because a check is made before this function is called.
8879 errorText_ = "RtApiOss::probeDeviceOpen: no devices found!";
8883 if ( device >= nDevices ) {
8884 // This should not happen because a check is made before this function is called.
8886 errorText_ = "RtApiOss::probeDeviceOpen: device ID is invalid!";
8890 oss_audioinfo ainfo;
8892 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
8894 if ( result == -1 ) {
8895 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
8896 errorText_ = errorStream_.str();
8900 // Check if device supports input or output
8901 if ( ( mode == OUTPUT && !( ainfo.caps & PCM_CAP_OUTPUT ) ) ||
8902 ( mode == INPUT && !( ainfo.caps & PCM_CAP_INPUT ) ) ) {
8903 if ( mode == OUTPUT )
8904 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support output.";
8906 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support input.";
8907 errorText_ = errorStream_.str();
8912 OssHandle *handle = (OssHandle *) stream_.apiHandle;
8913 if ( mode == OUTPUT )
8915 else { // mode == INPUT
8916 if (stream_.mode == OUTPUT && stream_.device[0] == device) {
8917 // We just set the same device for playback ... close and reopen for duplex (OSS only).
8918 close( handle->id[0] );
8920 if ( !( ainfo.caps & PCM_CAP_DUPLEX ) ) {
8921 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support duplex mode.";
8922 errorText_ = errorStream_.str();
8925 // Check that the number previously set channels is the same.
8926 if ( stream_.nUserChannels[0] != channels ) {
8927 errorStream_ << "RtApiOss::probeDeviceOpen: input/output channels must be equal for OSS duplex device (" << ainfo.name << ").";
8928 errorText_ = errorStream_.str();
8937 // Set exclusive access if specified.
8938 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) flags |= O_EXCL;
8940 // Try to open the device.
8942 fd = open( ainfo.devnode, flags, 0 );
8944 if ( errno == EBUSY )
8945 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") is busy.";
8947 errorStream_ << "RtApiOss::probeDeviceOpen: error opening device (" << ainfo.name << ").";
8948 errorText_ = errorStream_.str();
8952 // For duplex operation, specifically set this mode (this doesn't seem to work).
8954 if ( flags | O_RDWR ) {
8955 result = ioctl( fd, SNDCTL_DSP_SETDUPLEX, NULL );
8956 if ( result == -1) {
8957 errorStream_ << "RtApiOss::probeDeviceOpen: error setting duplex mode for device (" << ainfo.name << ").";
8958 errorText_ = errorStream_.str();
8964 // Check the device channel support.
8965 stream_.nUserChannels[mode] = channels;
8966 if ( ainfo.max_channels < (int)(channels + firstChannel) ) {
8968 errorStream_ << "RtApiOss::probeDeviceOpen: the device (" << ainfo.name << ") does not support requested channel parameters.";
8969 errorText_ = errorStream_.str();
8973 // Set the number of channels.
8974 int deviceChannels = channels + firstChannel;
8975 result = ioctl( fd, SNDCTL_DSP_CHANNELS, &deviceChannels );
8976 if ( result == -1 || deviceChannels < (int)(channels + firstChannel) ) {
8978 errorStream_ << "RtApiOss::probeDeviceOpen: error setting channel parameters on device (" << ainfo.name << ").";
8979 errorText_ = errorStream_.str();
8982 stream_.nDeviceChannels[mode] = deviceChannels;
8984 // Get the data format mask
8986 result = ioctl( fd, SNDCTL_DSP_GETFMTS, &mask );
8987 if ( result == -1 ) {
8989 errorStream_ << "RtApiOss::probeDeviceOpen: error getting device (" << ainfo.name << ") data formats.";
8990 errorText_ = errorStream_.str();
8994 // Determine how to set the device format.
8995 stream_.userFormat = format;
8996 int deviceFormat = -1;
8997 stream_.doByteSwap[mode] = false;
8998 if ( format == RTAUDIO_SINT8 ) {
8999 if ( mask & AFMT_S8 ) {
9000 deviceFormat = AFMT_S8;
9001 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
9004 else if ( format == RTAUDIO_SINT16 ) {
9005 if ( mask & AFMT_S16_NE ) {
9006 deviceFormat = AFMT_S16_NE;
9007 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9009 else if ( mask & AFMT_S16_OE ) {
9010 deviceFormat = AFMT_S16_OE;
9011 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9012 stream_.doByteSwap[mode] = true;
9015 else if ( format == RTAUDIO_SINT24 ) {
9016 if ( mask & AFMT_S24_NE ) {
9017 deviceFormat = AFMT_S24_NE;
9018 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9020 else if ( mask & AFMT_S24_OE ) {
9021 deviceFormat = AFMT_S24_OE;
9022 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9023 stream_.doByteSwap[mode] = true;
9026 else if ( format == RTAUDIO_SINT32 ) {
9027 if ( mask & AFMT_S32_NE ) {
9028 deviceFormat = AFMT_S32_NE;
9029 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9031 else if ( mask & AFMT_S32_OE ) {
9032 deviceFormat = AFMT_S32_OE;
9033 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9034 stream_.doByteSwap[mode] = true;
9038 if ( deviceFormat == -1 ) {
9039 // The user requested format is not natively supported by the device.
9040 if ( mask & AFMT_S16_NE ) {
9041 deviceFormat = AFMT_S16_NE;
9042 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9044 else if ( mask & AFMT_S32_NE ) {
9045 deviceFormat = AFMT_S32_NE;
9046 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9048 else if ( mask & AFMT_S24_NE ) {
9049 deviceFormat = AFMT_S24_NE;
9050 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9052 else if ( mask & AFMT_S16_OE ) {
9053 deviceFormat = AFMT_S16_OE;
9054 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9055 stream_.doByteSwap[mode] = true;
9057 else if ( mask & AFMT_S32_OE ) {
9058 deviceFormat = AFMT_S32_OE;
9059 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9060 stream_.doByteSwap[mode] = true;
9062 else if ( mask & AFMT_S24_OE ) {
9063 deviceFormat = AFMT_S24_OE;
9064 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9065 stream_.doByteSwap[mode] = true;
9067 else if ( mask & AFMT_S8) {
9068 deviceFormat = AFMT_S8;
9069 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
9073 if ( stream_.deviceFormat[mode] == 0 ) {
9074 // This really shouldn't happen ...
9076 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") data format not supported by RtAudio.";
9077 errorText_ = errorStream_.str();
9081 // Set the data format.
9082 int temp = deviceFormat;
9083 result = ioctl( fd, SNDCTL_DSP_SETFMT, &deviceFormat );
9084 if ( result == -1 || deviceFormat != temp ) {
9086 errorStream_ << "RtApiOss::probeDeviceOpen: error setting data format on device (" << ainfo.name << ").";
9087 errorText_ = errorStream_.str();
9091 // Attempt to set the buffer size. According to OSS, the minimum
9092 // number of buffers is two. The supposed minimum buffer size is 16
9093 // bytes, so that will be our lower bound. The argument to this
9094 // call is in the form 0xMMMMSSSS (hex), where the buffer size (in
9095 // bytes) is given as 2^SSSS and the number of buffers as 2^MMMM.
9096 // We'll check the actual value used near the end of the setup
9098 int ossBufferBytes = *bufferSize * formatBytes( stream_.deviceFormat[mode] ) * deviceChannels;
9099 if ( ossBufferBytes < 16 ) ossBufferBytes = 16;
9101 if ( options ) buffers = options->numberOfBuffers;
9102 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) buffers = 2;
9103 if ( buffers < 2 ) buffers = 3;
9104 temp = ((int) buffers << 16) + (int)( log10( (double)ossBufferBytes ) / log10( 2.0 ) );
9105 result = ioctl( fd, SNDCTL_DSP_SETFRAGMENT, &temp );
9106 if ( result == -1 ) {
9108 errorStream_ << "RtApiOss::probeDeviceOpen: error setting buffer size on device (" << ainfo.name << ").";
9109 errorText_ = errorStream_.str();
9112 stream_.nBuffers = buffers;
9114 // Save buffer size (in sample frames).
9115 *bufferSize = ossBufferBytes / ( formatBytes(stream_.deviceFormat[mode]) * deviceChannels );
9116 stream_.bufferSize = *bufferSize;
9118 // Set the sample rate.
9119 int srate = sampleRate;
9120 result = ioctl( fd, SNDCTL_DSP_SPEED, &srate );
9121 if ( result == -1 ) {
9123 errorStream_ << "RtApiOss::probeDeviceOpen: error setting sample rate (" << sampleRate << ") on device (" << ainfo.name << ").";
9124 errorText_ = errorStream_.str();
9128 // Verify the sample rate setup worked.
9129 if ( abs( srate - (int)sampleRate ) > 100 ) {
9131 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support sample rate (" << sampleRate << ").";
9132 errorText_ = errorStream_.str();
9135 stream_.sampleRate = sampleRate;
9137 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device) {
9138 // We're doing duplex setup here.
9139 stream_.deviceFormat[0] = stream_.deviceFormat[1];
9140 stream_.nDeviceChannels[0] = deviceChannels;
9143 // Set interleaving parameters.
9144 stream_.userInterleaved = true;
9145 stream_.deviceInterleaved[mode] = true;
9146 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
9147 stream_.userInterleaved = false;
9149 // Set flags for buffer conversion
9150 stream_.doConvertBuffer[mode] = false;
9151 if ( stream_.userFormat != stream_.deviceFormat[mode] )
9152 stream_.doConvertBuffer[mode] = true;
9153 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
9154 stream_.doConvertBuffer[mode] = true;
9155 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
9156 stream_.nUserChannels[mode] > 1 )
9157 stream_.doConvertBuffer[mode] = true;
9159 // Allocate the stream handles if necessary and then save.
9160 if ( stream_.apiHandle == 0 ) {
9162 handle = new OssHandle;
9164 catch ( std::bad_alloc& ) {
9165 errorText_ = "RtApiOss::probeDeviceOpen: error allocating OssHandle memory.";
9169 if ( pthread_cond_init( &handle->runnable, NULL ) ) {
9170 errorText_ = "RtApiOss::probeDeviceOpen: error initializing pthread condition variable.";
9174 stream_.apiHandle = (void *) handle;
9177 handle = (OssHandle *) stream_.apiHandle;
9179 handle->id[mode] = fd;
9181 // Allocate necessary internal buffers.
9182 unsigned long bufferBytes;
9183 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
9184 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
9185 if ( stream_.userBuffer[mode] == NULL ) {
9186 errorText_ = "RtApiOss::probeDeviceOpen: error allocating user buffer memory.";
9190 if ( stream_.doConvertBuffer[mode] ) {
9192 bool makeBuffer = true;
9193 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
9194 if ( mode == INPUT ) {
9195 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
9196 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
9197 if ( bufferBytes <= bytesOut ) makeBuffer = false;
9202 bufferBytes *= *bufferSize;
9203 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
9204 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
9205 if ( stream_.deviceBuffer == NULL ) {
9206 errorText_ = "RtApiOss::probeDeviceOpen: error allocating device buffer memory.";
9212 stream_.device[mode] = device;
9213 stream_.state = STREAM_STOPPED;
9215 // Setup the buffer conversion information structure.
9216 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
9218 // Setup thread if necessary.
9219 if ( stream_.mode == OUTPUT && mode == INPUT ) {
9220 // We had already set up an output stream.
9221 stream_.mode = DUPLEX;
9222 if ( stream_.device[0] == device ) handle->id[0] = fd;
9225 stream_.mode = mode;
9227 // Setup callback thread.
9228 stream_.callbackInfo.object = (void *) this;
9230 // Set the thread attributes for joinable and realtime scheduling
9231 // priority. The higher priority will only take affect if the
9232 // program is run as root or suid.
9233 pthread_attr_t attr;
9234 pthread_attr_init( &attr );
9235 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
9236 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
9237 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
9238 struct sched_param param;
9239 int priority = options->priority;
9240 int min = sched_get_priority_min( SCHED_RR );
9241 int max = sched_get_priority_max( SCHED_RR );
9242 if ( priority < min ) priority = min;
9243 else if ( priority > max ) priority = max;
9244 param.sched_priority = priority;
9245 pthread_attr_setschedparam( &attr, ¶m );
9246 pthread_attr_setschedpolicy( &attr, SCHED_RR );
9249 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
9251 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
9254 stream_.callbackInfo.isRunning = true;
9255 result = pthread_create( &stream_.callbackInfo.thread, &attr, ossCallbackHandler, &stream_.callbackInfo );
9256 pthread_attr_destroy( &attr );
9258 stream_.callbackInfo.isRunning = false;
9259 errorText_ = "RtApiOss::error creating callback thread!";
9268 pthread_cond_destroy( &handle->runnable );
9269 if ( handle->id[0] ) close( handle->id[0] );
9270 if ( handle->id[1] ) close( handle->id[1] );
9272 stream_.apiHandle = 0;
9275 for ( int i=0; i<2; i++ ) {
9276 if ( stream_.userBuffer[i] ) {
9277 free( stream_.userBuffer[i] );
9278 stream_.userBuffer[i] = 0;
9282 if ( stream_.deviceBuffer ) {
9283 free( stream_.deviceBuffer );
9284 stream_.deviceBuffer = 0;
9290 void RtApiOss :: closeStream()
9292 if ( stream_.state == STREAM_CLOSED ) {
9293 errorText_ = "RtApiOss::closeStream(): no open stream to close!";
9294 error( RtAudioError::WARNING );
9298 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9299 stream_.callbackInfo.isRunning = false;
9300 MUTEX_LOCK( &stream_.mutex );
9301 if ( stream_.state == STREAM_STOPPED )
9302 pthread_cond_signal( &handle->runnable );
9303 MUTEX_UNLOCK( &stream_.mutex );
9304 pthread_join( stream_.callbackInfo.thread, NULL );
9306 if ( stream_.state == STREAM_RUNNING ) {
9307 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
9308 ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9310 ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9311 stream_.state = STREAM_STOPPED;
9315 pthread_cond_destroy( &handle->runnable );
9316 if ( handle->id[0] ) close( handle->id[0] );
9317 if ( handle->id[1] ) close( handle->id[1] );
9319 stream_.apiHandle = 0;
9322 for ( int i=0; i<2; i++ ) {
9323 if ( stream_.userBuffer[i] ) {
9324 free( stream_.userBuffer[i] );
9325 stream_.userBuffer[i] = 0;
9329 if ( stream_.deviceBuffer ) {
9330 free( stream_.deviceBuffer );
9331 stream_.deviceBuffer = 0;
9334 stream_.mode = UNINITIALIZED;
9335 stream_.state = STREAM_CLOSED;
9338 void RtApiOss :: startStream()
9341 if ( stream_.state == STREAM_RUNNING ) {
9342 errorText_ = "RtApiOss::startStream(): the stream is already running!";
9343 error( RtAudioError::WARNING );
9347 MUTEX_LOCK( &stream_.mutex );
9349 stream_.state = STREAM_RUNNING;
9351 // No need to do anything else here ... OSS automatically starts
9352 // when fed samples.
9354 MUTEX_UNLOCK( &stream_.mutex );
9356 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9357 pthread_cond_signal( &handle->runnable );
9360 void RtApiOss :: stopStream()
9363 if ( stream_.state == STREAM_STOPPED ) {
9364 errorText_ = "RtApiOss::stopStream(): the stream is already stopped!";
9365 error( RtAudioError::WARNING );
9369 MUTEX_LOCK( &stream_.mutex );
9371 // The state might change while waiting on a mutex.
9372 if ( stream_.state == STREAM_STOPPED ) {
9373 MUTEX_UNLOCK( &stream_.mutex );
9378 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9379 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9381 // Flush the output with zeros a few times.
9384 RtAudioFormat format;
9386 if ( stream_.doConvertBuffer[0] ) {
9387 buffer = stream_.deviceBuffer;
9388 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
9389 format = stream_.deviceFormat[0];
9392 buffer = stream_.userBuffer[0];
9393 samples = stream_.bufferSize * stream_.nUserChannels[0];
9394 format = stream_.userFormat;
9397 memset( buffer, 0, samples * formatBytes(format) );
9398 for ( unsigned int i=0; i<stream_.nBuffers+1; i++ ) {
9399 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9400 if ( result == -1 ) {
9401 errorText_ = "RtApiOss::stopStream: audio write error.";
9402 error( RtAudioError::WARNING );
9406 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9407 if ( result == -1 ) {
9408 errorStream_ << "RtApiOss::stopStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
9409 errorText_ = errorStream_.str();
9412 handle->triggered = false;
9415 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
9416 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9417 if ( result == -1 ) {
9418 errorStream_ << "RtApiOss::stopStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
9419 errorText_ = errorStream_.str();
9425 stream_.state = STREAM_STOPPED;
9426 MUTEX_UNLOCK( &stream_.mutex );
9428 if ( result != -1 ) return;
9429 error( RtAudioError::SYSTEM_ERROR );
9432 void RtApiOss :: abortStream()
9435 if ( stream_.state == STREAM_STOPPED ) {
9436 errorText_ = "RtApiOss::abortStream(): the stream is already stopped!";
9437 error( RtAudioError::WARNING );
9441 MUTEX_LOCK( &stream_.mutex );
9443 // The state might change while waiting on a mutex.
9444 if ( stream_.state == STREAM_STOPPED ) {
9445 MUTEX_UNLOCK( &stream_.mutex );
9450 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9451 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9452 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9453 if ( result == -1 ) {
9454 errorStream_ << "RtApiOss::abortStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
9455 errorText_ = errorStream_.str();
9458 handle->triggered = false;
9461 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
9462 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9463 if ( result == -1 ) {
9464 errorStream_ << "RtApiOss::abortStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
9465 errorText_ = errorStream_.str();
9471 stream_.state = STREAM_STOPPED;
9472 MUTEX_UNLOCK( &stream_.mutex );
9474 if ( result != -1 ) return;
9475 error( RtAudioError::SYSTEM_ERROR );
9478 void RtApiOss :: callbackEvent()
9480 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9481 if ( stream_.state == STREAM_STOPPED ) {
9482 MUTEX_LOCK( &stream_.mutex );
9483 pthread_cond_wait( &handle->runnable, &stream_.mutex );
9484 if ( stream_.state != STREAM_RUNNING ) {
9485 MUTEX_UNLOCK( &stream_.mutex );
9488 MUTEX_UNLOCK( &stream_.mutex );
9491 if ( stream_.state == STREAM_CLOSED ) {
9492 errorText_ = "RtApiOss::callbackEvent(): the stream is closed ... this shouldn't happen!";
9493 error( RtAudioError::WARNING );
9497 // Invoke user callback to get fresh output data.
9498 int doStopStream = 0;
9499 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
9500 double streamTime = getStreamTime();
9501 RtAudioStreamStatus status = 0;
9502 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
9503 status |= RTAUDIO_OUTPUT_UNDERFLOW;
9504 handle->xrun[0] = false;
9506 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
9507 status |= RTAUDIO_INPUT_OVERFLOW;
9508 handle->xrun[1] = false;
9510 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
9511 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
9512 if ( doStopStream == 2 ) {
9513 this->abortStream();
9517 MUTEX_LOCK( &stream_.mutex );
9519 // The state might change while waiting on a mutex.
9520 if ( stream_.state == STREAM_STOPPED ) goto unlock;
9525 RtAudioFormat format;
9527 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9529 // Setup parameters and do buffer conversion if necessary.
9530 if ( stream_.doConvertBuffer[0] ) {
9531 buffer = stream_.deviceBuffer;
9532 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
9533 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
9534 format = stream_.deviceFormat[0];
9537 buffer = stream_.userBuffer[0];
9538 samples = stream_.bufferSize * stream_.nUserChannels[0];
9539 format = stream_.userFormat;
9542 // Do byte swapping if necessary.
9543 if ( stream_.doByteSwap[0] )
9544 byteSwapBuffer( buffer, samples, format );
9546 if ( stream_.mode == DUPLEX && handle->triggered == false ) {
9548 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
9549 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9550 trig = PCM_ENABLE_INPUT|PCM_ENABLE_OUTPUT;
9551 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
9552 handle->triggered = true;
9555 // Write samples to device.
9556 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9558 if ( result == -1 ) {
9559 // We'll assume this is an underrun, though there isn't a
9560 // specific means for determining that.
9561 handle->xrun[0] = true;
9562 errorText_ = "RtApiOss::callbackEvent: audio write error.";
9563 error( RtAudioError::WARNING );
9564 // Continue on to input section.
9568 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
9570 // Setup parameters.
9571 if ( stream_.doConvertBuffer[1] ) {
9572 buffer = stream_.deviceBuffer;
9573 samples = stream_.bufferSize * stream_.nDeviceChannels[1];
9574 format = stream_.deviceFormat[1];
9577 buffer = stream_.userBuffer[1];
9578 samples = stream_.bufferSize * stream_.nUserChannels[1];
9579 format = stream_.userFormat;
9582 // Read samples from device.
9583 result = read( handle->id[1], buffer, samples * formatBytes(format) );
9585 if ( result == -1 ) {
9586 // We'll assume this is an overrun, though there isn't a
9587 // specific means for determining that.
9588 handle->xrun[1] = true;
9589 errorText_ = "RtApiOss::callbackEvent: audio read error.";
9590 error( RtAudioError::WARNING );
9594 // Do byte swapping if necessary.
9595 if ( stream_.doByteSwap[1] )
9596 byteSwapBuffer( buffer, samples, format );
9598 // Do buffer conversion if necessary.
9599 if ( stream_.doConvertBuffer[1] )
9600 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
9604 MUTEX_UNLOCK( &stream_.mutex );
9606 RtApi::tickStreamTime();
9607 if ( doStopStream == 1 ) this->stopStream();
9610 static void *ossCallbackHandler( void *ptr )
9612 CallbackInfo *info = (CallbackInfo *) ptr;
9613 RtApiOss *object = (RtApiOss *) info->object;
9614 bool *isRunning = &info->isRunning;
9616 while ( *isRunning == true ) {
9617 pthread_testcancel();
9618 object->callbackEvent();
9621 pthread_exit( NULL );
9624 //******************** End of __LINUX_OSS__ *********************//
9628 // *************************************************** //
9630 // Protected common (OS-independent) RtAudio methods.
9632 // *************************************************** //
9634 // This method can be modified to control the behavior of error
9635 // message printing.
9636 void RtApi :: error( RtAudioError::Type type )
9638 errorStream_.str(""); // clear the ostringstream
9640 RtAudioErrorCallback errorCallback = (RtAudioErrorCallback) stream_.callbackInfo.errorCallback;
9641 if ( errorCallback ) {
9642 // abortStream() can generate new error messages. Ignore them. Just keep original one.
9644 if ( firstErrorOccurred_ )
9647 firstErrorOccurred_ = true;
9648 const std::string errorMessage = errorText_;
9650 if ( type != RtAudioError::WARNING && stream_.state != STREAM_STOPPED) {
9651 stream_.callbackInfo.isRunning = false; // exit from the thread
9655 errorCallback( type, errorMessage );
9656 firstErrorOccurred_ = false;
9660 if ( type == RtAudioError::WARNING && showWarnings_ == true )
9661 std::cerr << '\n' << errorText_ << "\n\n";
9662 else if ( type != RtAudioError::WARNING )
9663 throw( RtAudioError( errorText_, type ) );
9666 void RtApi :: verifyStream()
9668 if ( stream_.state == STREAM_CLOSED ) {
9669 errorText_ = "RtApi:: a stream is not open!";
9670 error( RtAudioError::INVALID_USE );
9674 void RtApi :: clearStreamInfo()
9676 stream_.mode = UNINITIALIZED;
9677 stream_.state = STREAM_CLOSED;
9678 stream_.sampleRate = 0;
9679 stream_.bufferSize = 0;
9680 stream_.nBuffers = 0;
9681 stream_.userFormat = 0;
9682 stream_.userInterleaved = true;
9683 stream_.streamTime = 0.0;
9684 stream_.apiHandle = 0;
9685 stream_.deviceBuffer = 0;
9686 stream_.callbackInfo.callback = 0;
9687 stream_.callbackInfo.userData = 0;
9688 stream_.callbackInfo.isRunning = false;
9689 stream_.callbackInfo.errorCallback = 0;
9690 for ( int i=0; i<2; i++ ) {
9691 stream_.device[i] = 11111;
9692 stream_.doConvertBuffer[i] = false;
9693 stream_.deviceInterleaved[i] = true;
9694 stream_.doByteSwap[i] = false;
9695 stream_.nUserChannels[i] = 0;
9696 stream_.nDeviceChannels[i] = 0;
9697 stream_.channelOffset[i] = 0;
9698 stream_.deviceFormat[i] = 0;
9699 stream_.latency[i] = 0;
9700 stream_.userBuffer[i] = 0;
9701 stream_.convertInfo[i].channels = 0;
9702 stream_.convertInfo[i].inJump = 0;
9703 stream_.convertInfo[i].outJump = 0;
9704 stream_.convertInfo[i].inFormat = 0;
9705 stream_.convertInfo[i].outFormat = 0;
9706 stream_.convertInfo[i].inOffset.clear();
9707 stream_.convertInfo[i].outOffset.clear();
9711 unsigned int RtApi :: formatBytes( RtAudioFormat format )
9713 if ( format == RTAUDIO_SINT16 )
9715 else if ( format == RTAUDIO_SINT32 || format == RTAUDIO_FLOAT32 )
9717 else if ( format == RTAUDIO_FLOAT64 )
9719 else if ( format == RTAUDIO_SINT24 )
9721 else if ( format == RTAUDIO_SINT8 )
9724 errorText_ = "RtApi::formatBytes: undefined format.";
9725 error( RtAudioError::WARNING );
9730 void RtApi :: setConvertInfo( StreamMode mode, unsigned int firstChannel )
9732 if ( mode == INPUT ) { // convert device to user buffer
9733 stream_.convertInfo[mode].inJump = stream_.nDeviceChannels[1];
9734 stream_.convertInfo[mode].outJump = stream_.nUserChannels[1];
9735 stream_.convertInfo[mode].inFormat = stream_.deviceFormat[1];
9736 stream_.convertInfo[mode].outFormat = stream_.userFormat;
9738 else { // convert user to device buffer
9739 stream_.convertInfo[mode].inJump = stream_.nUserChannels[0];
9740 stream_.convertInfo[mode].outJump = stream_.nDeviceChannels[0];
9741 stream_.convertInfo[mode].inFormat = stream_.userFormat;
9742 stream_.convertInfo[mode].outFormat = stream_.deviceFormat[0];
9745 if ( stream_.convertInfo[mode].inJump < stream_.convertInfo[mode].outJump )
9746 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].inJump;
9748 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].outJump;
9750 // Set up the interleave/deinterleave offsets.
9751 if ( stream_.deviceInterleaved[mode] != stream_.userInterleaved ) {
9752 if ( ( mode == OUTPUT && stream_.deviceInterleaved[mode] ) ||
9753 ( mode == INPUT && stream_.userInterleaved ) ) {
9754 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
9755 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
9756 stream_.convertInfo[mode].outOffset.push_back( k );
9757 stream_.convertInfo[mode].inJump = 1;
9761 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
9762 stream_.convertInfo[mode].inOffset.push_back( k );
9763 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
9764 stream_.convertInfo[mode].outJump = 1;
9768 else { // no (de)interleaving
9769 if ( stream_.userInterleaved ) {
9770 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
9771 stream_.convertInfo[mode].inOffset.push_back( k );
9772 stream_.convertInfo[mode].outOffset.push_back( k );
9776 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
9777 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
9778 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
9779 stream_.convertInfo[mode].inJump = 1;
9780 stream_.convertInfo[mode].outJump = 1;
9785 // Add channel offset.
9786 if ( firstChannel > 0 ) {
9787 if ( stream_.deviceInterleaved[mode] ) {
9788 if ( mode == OUTPUT ) {
9789 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
9790 stream_.convertInfo[mode].outOffset[k] += firstChannel;
9793 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
9794 stream_.convertInfo[mode].inOffset[k] += firstChannel;
9798 if ( mode == OUTPUT ) {
9799 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
9800 stream_.convertInfo[mode].outOffset[k] += ( firstChannel * stream_.bufferSize );
9803 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
9804 stream_.convertInfo[mode].inOffset[k] += ( firstChannel * stream_.bufferSize );
9810 void RtApi :: convertBuffer( char *outBuffer, char *inBuffer, ConvertInfo &info )
9812 // This function does format conversion, input/output channel compensation, and
9813 // data interleaving/deinterleaving. 24-bit integers are assumed to occupy
9814 // the lower three bytes of a 32-bit integer.
9816 // Clear our device buffer when in/out duplex device channels are different
9817 if ( outBuffer == stream_.deviceBuffer && stream_.mode == DUPLEX &&
9818 ( stream_.nDeviceChannels[0] < stream_.nDeviceChannels[1] ) )
9819 memset( outBuffer, 0, stream_.bufferSize * info.outJump * formatBytes( info.outFormat ) );
9822 if (info.outFormat == RTAUDIO_FLOAT64) {
9824 Float64 *out = (Float64 *)outBuffer;
9826 if (info.inFormat == RTAUDIO_SINT8) {
9827 signed char *in = (signed char *)inBuffer;
9828 scale = 1.0 / 127.5;
9829 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9830 for (j=0; j<info.channels; j++) {
9831 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
9832 out[info.outOffset[j]] += 0.5;
9833 out[info.outOffset[j]] *= scale;
9836 out += info.outJump;
9839 else if (info.inFormat == RTAUDIO_SINT16) {
9840 Int16 *in = (Int16 *)inBuffer;
9841 scale = 1.0 / 32767.5;
9842 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9843 for (j=0; j<info.channels; j++) {
9844 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
9845 out[info.outOffset[j]] += 0.5;
9846 out[info.outOffset[j]] *= scale;
9849 out += info.outJump;
9852 else if (info.inFormat == RTAUDIO_SINT24) {
9853 Int24 *in = (Int24 *)inBuffer;
9854 scale = 1.0 / 8388607.5;
9855 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9856 for (j=0; j<info.channels; j++) {
9857 out[info.outOffset[j]] = (Float64) (in[info.inOffset[j]].asInt());
9858 out[info.outOffset[j]] += 0.5;
9859 out[info.outOffset[j]] *= scale;
9862 out += info.outJump;
9865 else if (info.inFormat == RTAUDIO_SINT32) {
9866 Int32 *in = (Int32 *)inBuffer;
9867 scale = 1.0 / 2147483647.5;
9868 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9869 for (j=0; j<info.channels; j++) {
9870 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
9871 out[info.outOffset[j]] += 0.5;
9872 out[info.outOffset[j]] *= scale;
9875 out += info.outJump;
9878 else if (info.inFormat == RTAUDIO_FLOAT32) {
9879 Float32 *in = (Float32 *)inBuffer;
9880 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9881 for (j=0; j<info.channels; j++) {
9882 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
9885 out += info.outJump;
9888 else if (info.inFormat == RTAUDIO_FLOAT64) {
9889 // Channel compensation and/or (de)interleaving only.
9890 Float64 *in = (Float64 *)inBuffer;
9891 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9892 for (j=0; j<info.channels; j++) {
9893 out[info.outOffset[j]] = in[info.inOffset[j]];
9896 out += info.outJump;
9900 else if (info.outFormat == RTAUDIO_FLOAT32) {
9902 Float32 *out = (Float32 *)outBuffer;
9904 if (info.inFormat == RTAUDIO_SINT8) {
9905 signed char *in = (signed char *)inBuffer;
9906 scale = (Float32) ( 1.0 / 127.5 );
9907 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9908 for (j=0; j<info.channels; j++) {
9909 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
9910 out[info.outOffset[j]] += 0.5;
9911 out[info.outOffset[j]] *= scale;
9914 out += info.outJump;
9917 else if (info.inFormat == RTAUDIO_SINT16) {
9918 Int16 *in = (Int16 *)inBuffer;
9919 scale = (Float32) ( 1.0 / 32767.5 );
9920 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9921 for (j=0; j<info.channels; j++) {
9922 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
9923 out[info.outOffset[j]] += 0.5;
9924 out[info.outOffset[j]] *= scale;
9927 out += info.outJump;
9930 else if (info.inFormat == RTAUDIO_SINT24) {
9931 Int24 *in = (Int24 *)inBuffer;
9932 scale = (Float32) ( 1.0 / 8388607.5 );
9933 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9934 for (j=0; j<info.channels; j++) {
9935 out[info.outOffset[j]] = (Float32) (in[info.inOffset[j]].asInt());
9936 out[info.outOffset[j]] += 0.5;
9937 out[info.outOffset[j]] *= scale;
9940 out += info.outJump;
9943 else if (info.inFormat == RTAUDIO_SINT32) {
9944 Int32 *in = (Int32 *)inBuffer;
9945 scale = (Float32) ( 1.0 / 2147483647.5 );
9946 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9947 for (j=0; j<info.channels; j++) {
9948 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
9949 out[info.outOffset[j]] += 0.5;
9950 out[info.outOffset[j]] *= scale;
9953 out += info.outJump;
9956 else if (info.inFormat == RTAUDIO_FLOAT32) {
9957 // Channel compensation and/or (de)interleaving only.
9958 Float32 *in = (Float32 *)inBuffer;
9959 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9960 for (j=0; j<info.channels; j++) {
9961 out[info.outOffset[j]] = in[info.inOffset[j]];
9964 out += info.outJump;
9967 else if (info.inFormat == RTAUDIO_FLOAT64) {
9968 Float64 *in = (Float64 *)inBuffer;
9969 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9970 for (j=0; j<info.channels; j++) {
9971 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
9974 out += info.outJump;
9978 else if (info.outFormat == RTAUDIO_SINT32) {
9979 Int32 *out = (Int32 *)outBuffer;
9980 if (info.inFormat == RTAUDIO_SINT8) {
9981 signed char *in = (signed char *)inBuffer;
9982 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9983 for (j=0; j<info.channels; j++) {
9984 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
9985 out[info.outOffset[j]] <<= 24;
9988 out += info.outJump;
9991 else if (info.inFormat == RTAUDIO_SINT16) {
9992 Int16 *in = (Int16 *)inBuffer;
9993 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9994 for (j=0; j<info.channels; j++) {
9995 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
9996 out[info.outOffset[j]] <<= 16;
9999 out += info.outJump;
10002 else if (info.inFormat == RTAUDIO_SINT24) {
10003 Int24 *in = (Int24 *)inBuffer;
10004 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10005 for (j=0; j<info.channels; j++) {
10006 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]].asInt();
10007 out[info.outOffset[j]] <<= 8;
10010 out += info.outJump;
10013 else if (info.inFormat == RTAUDIO_SINT32) {
10014 // Channel compensation and/or (de)interleaving only.
10015 Int32 *in = (Int32 *)inBuffer;
10016 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10017 for (j=0; j<info.channels; j++) {
10018 out[info.outOffset[j]] = in[info.inOffset[j]];
10021 out += info.outJump;
10024 else if (info.inFormat == RTAUDIO_FLOAT32) {
10025 Float32 *in = (Float32 *)inBuffer;
10026 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10027 for (j=0; j<info.channels; j++) {
10028 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
10031 out += info.outJump;
10034 else if (info.inFormat == RTAUDIO_FLOAT64) {
10035 Float64 *in = (Float64 *)inBuffer;
10036 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10037 for (j=0; j<info.channels; j++) {
10038 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
10041 out += info.outJump;
10045 else if (info.outFormat == RTAUDIO_SINT24) {
10046 Int24 *out = (Int24 *)outBuffer;
10047 if (info.inFormat == RTAUDIO_SINT8) {
10048 signed char *in = (signed char *)inBuffer;
10049 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10050 for (j=0; j<info.channels; j++) {
10051 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 16);
10052 //out[info.outOffset[j]] <<= 16;
10055 out += info.outJump;
10058 else if (info.inFormat == RTAUDIO_SINT16) {
10059 Int16 *in = (Int16 *)inBuffer;
10060 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10061 for (j=0; j<info.channels; j++) {
10062 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 8);
10063 //out[info.outOffset[j]] <<= 8;
10066 out += info.outJump;
10069 else if (info.inFormat == RTAUDIO_SINT24) {
10070 // Channel compensation and/or (de)interleaving only.
10071 Int24 *in = (Int24 *)inBuffer;
10072 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10073 for (j=0; j<info.channels; j++) {
10074 out[info.outOffset[j]] = in[info.inOffset[j]];
10077 out += info.outJump;
10080 else if (info.inFormat == RTAUDIO_SINT32) {
10081 Int32 *in = (Int32 *)inBuffer;
10082 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10083 for (j=0; j<info.channels; j++) {
10084 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] >> 8);
10085 //out[info.outOffset[j]] >>= 8;
10088 out += info.outJump;
10091 else if (info.inFormat == RTAUDIO_FLOAT32) {
10092 Float32 *in = (Float32 *)inBuffer;
10093 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10094 for (j=0; j<info.channels; j++) {
10095 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
10098 out += info.outJump;
10101 else if (info.inFormat == RTAUDIO_FLOAT64) {
10102 Float64 *in = (Float64 *)inBuffer;
10103 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10104 for (j=0; j<info.channels; j++) {
10105 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
10108 out += info.outJump;
10112 else if (info.outFormat == RTAUDIO_SINT16) {
10113 Int16 *out = (Int16 *)outBuffer;
10114 if (info.inFormat == RTAUDIO_SINT8) {
10115 signed char *in = (signed char *)inBuffer;
10116 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10117 for (j=0; j<info.channels; j++) {
10118 out[info.outOffset[j]] = (Int16) in[info.inOffset[j]];
10119 out[info.outOffset[j]] <<= 8;
10122 out += info.outJump;
10125 else if (info.inFormat == RTAUDIO_SINT16) {
10126 // Channel compensation and/or (de)interleaving only.
10127 Int16 *in = (Int16 *)inBuffer;
10128 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10129 for (j=0; j<info.channels; j++) {
10130 out[info.outOffset[j]] = in[info.inOffset[j]];
10133 out += info.outJump;
10136 else if (info.inFormat == RTAUDIO_SINT24) {
10137 Int24 *in = (Int24 *)inBuffer;
10138 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10139 for (j=0; j<info.channels; j++) {
10140 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]].asInt() >> 8);
10143 out += info.outJump;
10146 else if (info.inFormat == RTAUDIO_SINT32) {
10147 Int32 *in = (Int32 *)inBuffer;
10148 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10149 for (j=0; j<info.channels; j++) {
10150 out[info.outOffset[j]] = (Int16) ((in[info.inOffset[j]] >> 16) & 0x0000ffff);
10153 out += info.outJump;
10156 else if (info.inFormat == RTAUDIO_FLOAT32) {
10157 Float32 *in = (Float32 *)inBuffer;
10158 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10159 for (j=0; j<info.channels; j++) {
10160 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
10163 out += info.outJump;
10166 else if (info.inFormat == RTAUDIO_FLOAT64) {
10167 Float64 *in = (Float64 *)inBuffer;
10168 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10169 for (j=0; j<info.channels; j++) {
10170 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
10173 out += info.outJump;
10177 else if (info.outFormat == RTAUDIO_SINT8) {
10178 signed char *out = (signed char *)outBuffer;
10179 if (info.inFormat == RTAUDIO_SINT8) {
10180 // Channel compensation and/or (de)interleaving only.
10181 signed char *in = (signed char *)inBuffer;
10182 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10183 for (j=0; j<info.channels; j++) {
10184 out[info.outOffset[j]] = in[info.inOffset[j]];
10187 out += info.outJump;
10190 if (info.inFormat == RTAUDIO_SINT16) {
10191 Int16 *in = (Int16 *)inBuffer;
10192 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10193 for (j=0; j<info.channels; j++) {
10194 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 8) & 0x00ff);
10197 out += info.outJump;
10200 else if (info.inFormat == RTAUDIO_SINT24) {
10201 Int24 *in = (Int24 *)inBuffer;
10202 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10203 for (j=0; j<info.channels; j++) {
10204 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]].asInt() >> 16);
10207 out += info.outJump;
10210 else if (info.inFormat == RTAUDIO_SINT32) {
10211 Int32 *in = (Int32 *)inBuffer;
10212 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10213 for (j=0; j<info.channels; j++) {
10214 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 24) & 0x000000ff);
10217 out += info.outJump;
10220 else if (info.inFormat == RTAUDIO_FLOAT32) {
10221 Float32 *in = (Float32 *)inBuffer;
10222 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10223 for (j=0; j<info.channels; j++) {
10224 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
10227 out += info.outJump;
10230 else if (info.inFormat == RTAUDIO_FLOAT64) {
10231 Float64 *in = (Float64 *)inBuffer;
10232 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10233 for (j=0; j<info.channels; j++) {
10234 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
10237 out += info.outJump;
10243 //static inline uint16_t bswap_16(uint16_t x) { return (x>>8) | (x<<8); }
10244 //static inline uint32_t bswap_32(uint32_t x) { return (bswap_16(x&0xffff)<<16) | (bswap_16(x>>16)); }
10245 //static inline uint64_t bswap_64(uint64_t x) { return (((unsigned long long)bswap_32(x&0xffffffffull))<<32) | (bswap_32(x>>32)); }
10247 void RtApi :: byteSwapBuffer( char *buffer, unsigned int samples, RtAudioFormat format )
10253 if ( format == RTAUDIO_SINT16 ) {
10254 for ( unsigned int i=0; i<samples; i++ ) {
10255 // Swap 1st and 2nd bytes.
10260 // Increment 2 bytes.
10264 else if ( format == RTAUDIO_SINT32 ||
10265 format == RTAUDIO_FLOAT32 ) {
10266 for ( unsigned int i=0; i<samples; i++ ) {
10267 // Swap 1st and 4th bytes.
10272 // Swap 2nd and 3rd bytes.
10278 // Increment 3 more bytes.
10282 else if ( format == RTAUDIO_SINT24 ) {
10283 for ( unsigned int i=0; i<samples; i++ ) {
10284 // Swap 1st and 3rd bytes.
10289 // Increment 2 more bytes.
10293 else if ( format == RTAUDIO_FLOAT64 ) {
10294 for ( unsigned int i=0; i<samples; i++ ) {
10295 // Swap 1st and 8th bytes
10300 // Swap 2nd and 7th bytes
10306 // Swap 3rd and 6th bytes
10312 // Swap 4th and 5th bytes
10318 // Increment 5 more bytes.
10324 // Indentation settings for Vim and Emacs
10326 // Local Variables:
10327 // c-basic-offset: 2
10328 // indent-tabs-mode: nil
10331 // vim: et sts=2 sw=2