1 /************************************************************************/
3 \brief Realtime audio i/o C++ classes.
5 RtAudio provides a common API (Application Programming Interface)
6 for realtime audio input/output across Linux (native ALSA, Jack,
7 and OSS), Macintosh OS X (CoreAudio and Jack), and Windows
8 (DirectSound, ASIO and WASAPI) operating systems.
10 RtAudio WWW site: http://www.music.mcgill.ca/~gary/rtaudio/
12 RtAudio: realtime audio i/o C++ classes
13 Copyright (c) 2001-2017 Gary P. Scavone
15 Permission is hereby granted, free of charge, to any person
16 obtaining a copy of this software and associated documentation files
17 (the "Software"), to deal in the Software without restriction,
18 including without limitation the rights to use, copy, modify, merge,
19 publish, distribute, sublicense, and/or sell copies of the Software,
20 and to permit persons to whom the Software is furnished to do so,
21 subject to the following conditions:
23 The above copyright notice and this permission notice shall be
24 included in all copies or substantial portions of the Software.
26 Any person wishing to distribute modifications to the Software is
27 asked to send the modifications to the original developer so that
28 they can be incorporated into the canonical version. This is,
29 however, not a binding provision of this license.
31 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
32 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
33 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
34 IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
35 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
36 CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
37 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
39 /************************************************************************/
41 // RtAudio: Version 5.0.0
51 // Static variable definitions.
52 const unsigned int RtApi::MAX_SAMPLE_RATES = 14;
53 const unsigned int RtApi::SAMPLE_RATES[] = {
54 4000, 5512, 8000, 9600, 11025, 16000, 22050,
55 32000, 44100, 48000, 88200, 96000, 176400, 192000
58 #if defined(__WINDOWS_DS__) || defined(__WINDOWS_ASIO__) || defined(__WINDOWS_WASAPI__)
59 #define MUTEX_INITIALIZE(A) InitializeCriticalSection(A)
60 #define MUTEX_DESTROY(A) DeleteCriticalSection(A)
61 #define MUTEX_LOCK(A) EnterCriticalSection(A)
62 #define MUTEX_UNLOCK(A) LeaveCriticalSection(A)
66 static std::string convertCharPointerToStdString(const char *text)
68 return std::string(text);
71 static std::string convertCharPointerToStdString(const wchar_t *text)
73 int length = WideCharToMultiByte(CP_UTF8, 0, text, -1, NULL, 0, NULL, NULL);
74 std::string s( length-1, '\0' );
75 WideCharToMultiByte(CP_UTF8, 0, text, -1, &s[0], length, NULL, NULL);
79 #elif defined(__LINUX_ALSA__) || defined(__LINUX_PULSE__) || defined(__UNIX_JACK__) || defined(__LINUX_OSS__) || defined(__MACOSX_CORE__)
81 #define MUTEX_INITIALIZE(A) pthread_mutex_init(A, NULL)
82 #define MUTEX_DESTROY(A) pthread_mutex_destroy(A)
83 #define MUTEX_LOCK(A) pthread_mutex_lock(A)
84 #define MUTEX_UNLOCK(A) pthread_mutex_unlock(A)
86 #define MUTEX_INITIALIZE(A) abs(*A) // dummy definitions
87 #define MUTEX_DESTROY(A) abs(*A) // dummy definitions
90 // *************************************************** //
92 // RtAudio definitions.
94 // *************************************************** //
96 std::string RtAudio :: getVersion( void )
98 return RTAUDIO_VERSION;
101 void RtAudio :: getCompiledApi( std::vector<RtAudio::Api> &apis )
105 // The order here will control the order of RtAudio's API search in
107 #if defined(__UNIX_JACK__)
108 apis.push_back( UNIX_JACK );
110 #if defined(__LINUX_ALSA__)
111 apis.push_back( LINUX_ALSA );
113 #if defined(__LINUX_PULSE__)
114 apis.push_back( LINUX_PULSE );
116 #if defined(__LINUX_OSS__)
117 apis.push_back( LINUX_OSS );
119 #if defined(__WINDOWS_ASIO__)
120 apis.push_back( WINDOWS_ASIO );
122 #if defined(__WINDOWS_WASAPI__)
123 apis.push_back( WINDOWS_WASAPI );
125 #if defined(__WINDOWS_DS__)
126 apis.push_back( WINDOWS_DS );
128 #if defined(__MACOSX_CORE__)
129 apis.push_back( MACOSX_CORE );
131 #if defined(__RTAUDIO_DUMMY__)
132 apis.push_back( RTAUDIO_DUMMY );
136 void RtAudio :: openRtApi( RtAudio::Api api )
142 #if defined(__UNIX_JACK__)
143 if ( api == UNIX_JACK )
144 rtapi_ = new RtApiJack();
146 #if defined(__LINUX_ALSA__)
147 if ( api == LINUX_ALSA )
148 rtapi_ = new RtApiAlsa();
150 #if defined(__LINUX_PULSE__)
151 if ( api == LINUX_PULSE )
152 rtapi_ = new RtApiPulse();
154 #if defined(__LINUX_OSS__)
155 if ( api == LINUX_OSS )
156 rtapi_ = new RtApiOss();
158 #if defined(__WINDOWS_ASIO__)
159 if ( api == WINDOWS_ASIO )
160 rtapi_ = new RtApiAsio();
162 #if defined(__WINDOWS_WASAPI__)
163 if ( api == WINDOWS_WASAPI )
164 rtapi_ = new RtApiWasapi();
166 #if defined(__WINDOWS_DS__)
167 if ( api == WINDOWS_DS )
168 rtapi_ = new RtApiDs();
170 #if defined(__MACOSX_CORE__)
171 if ( api == MACOSX_CORE )
172 rtapi_ = new RtApiCore();
174 #if defined(__RTAUDIO_DUMMY__)
175 if ( api == RTAUDIO_DUMMY )
176 rtapi_ = new RtApiDummy();
180 RtAudio :: RtAudio( RtAudio::Api api )
184 if ( api != UNSPECIFIED ) {
185 // Attempt to open the specified API.
187 if ( rtapi_ ) return;
189 // No compiled support for specified API value. Issue a debug
190 // warning and continue as if no API was specified.
191 std::cerr << "\nRtAudio: no compiled support for specified API argument!\n" << std::endl;
194 // Iterate through the compiled APIs and return as soon as we find
195 // one with at least one device or we reach the end of the list.
196 std::vector< RtAudio::Api > apis;
197 getCompiledApi( apis );
198 for ( unsigned int i=0; i<apis.size(); i++ ) {
199 openRtApi( apis[i] );
200 if ( rtapi_ && rtapi_->getDeviceCount() ) break;
203 if ( rtapi_ ) return;
205 // It should not be possible to get here because the preprocessor
206 // definition __RTAUDIO_DUMMY__ is automatically defined if no
207 // API-specific definitions are passed to the compiler. But just in
208 // case something weird happens, we'll thow an error.
209 std::string errorText = "\nRtAudio: no compiled API support found ... critical error!!\n\n";
210 throw( RtAudioError( errorText, RtAudioError::UNSPECIFIED ) );
213 RtAudio :: ~RtAudio()
219 void RtAudio :: openStream( RtAudio::StreamParameters *outputParameters,
220 RtAudio::StreamParameters *inputParameters,
221 RtAudioFormat format, unsigned int sampleRate,
222 unsigned int *bufferFrames,
223 RtAudioCallback callback, void *userData,
224 RtAudio::StreamOptions *options,
225 RtAudioErrorCallback errorCallback )
227 return rtapi_->openStream( outputParameters, inputParameters, format,
228 sampleRate, bufferFrames, callback,
229 userData, options, errorCallback );
232 // *************************************************** //
234 // Public RtApi definitions (see end of file for
235 // private or protected utility functions).
237 // *************************************************** //
241 stream_.state = STREAM_CLOSED;
242 stream_.mode = UNINITIALIZED;
243 stream_.apiHandle = 0;
244 stream_.userBuffer[0] = 0;
245 stream_.userBuffer[1] = 0;
246 MUTEX_INITIALIZE( &stream_.mutex );
247 showWarnings_ = true;
248 firstErrorOccurred_ = false;
253 MUTEX_DESTROY( &stream_.mutex );
256 void RtApi :: openStream( RtAudio::StreamParameters *oParams,
257 RtAudio::StreamParameters *iParams,
258 RtAudioFormat format, unsigned int sampleRate,
259 unsigned int *bufferFrames,
260 RtAudioCallback callback, void *userData,
261 RtAudio::StreamOptions *options,
262 RtAudioErrorCallback errorCallback )
264 if ( stream_.state != STREAM_CLOSED ) {
265 errorText_ = "RtApi::openStream: a stream is already open!";
266 error( RtAudioError::INVALID_USE );
270 // Clear stream information potentially left from a previously open stream.
273 if ( oParams && oParams->nChannels < 1 ) {
274 errorText_ = "RtApi::openStream: a non-NULL output StreamParameters structure cannot have an nChannels value less than one.";
275 error( RtAudioError::INVALID_USE );
279 if ( iParams && iParams->nChannels < 1 ) {
280 errorText_ = "RtApi::openStream: a non-NULL input StreamParameters structure cannot have an nChannels value less than one.";
281 error( RtAudioError::INVALID_USE );
285 if ( oParams == NULL && iParams == NULL ) {
286 errorText_ = "RtApi::openStream: input and output StreamParameters structures are both NULL!";
287 error( RtAudioError::INVALID_USE );
291 if ( formatBytes(format) == 0 ) {
292 errorText_ = "RtApi::openStream: 'format' parameter value is undefined.";
293 error( RtAudioError::INVALID_USE );
297 unsigned int nDevices = getDeviceCount();
298 unsigned int oChannels = 0;
300 oChannels = oParams->nChannels;
301 if ( oParams->deviceId >= nDevices ) {
302 errorText_ = "RtApi::openStream: output device parameter value is invalid.";
303 error( RtAudioError::INVALID_USE );
308 unsigned int iChannels = 0;
310 iChannels = iParams->nChannels;
311 if ( iParams->deviceId >= nDevices ) {
312 errorText_ = "RtApi::openStream: input device parameter value is invalid.";
313 error( RtAudioError::INVALID_USE );
320 if ( oChannels > 0 ) {
322 result = probeDeviceOpen( oParams->deviceId, OUTPUT, oChannels, oParams->firstChannel,
323 sampleRate, format, bufferFrames, options );
324 if ( result == false ) {
325 error( RtAudioError::SYSTEM_ERROR );
330 if ( iChannels > 0 ) {
332 result = probeDeviceOpen( iParams->deviceId, INPUT, iChannels, iParams->firstChannel,
333 sampleRate, format, bufferFrames, options );
334 if ( result == false ) {
335 if ( oChannels > 0 ) closeStream();
336 error( RtAudioError::SYSTEM_ERROR );
341 stream_.callbackInfo.callback = (void *) callback;
342 stream_.callbackInfo.userData = userData;
343 stream_.callbackInfo.errorCallback = (void *) errorCallback;
345 if ( options ) options->numberOfBuffers = stream_.nBuffers;
346 stream_.state = STREAM_STOPPED;
349 unsigned int RtApi :: getDefaultInputDevice( void )
351 // Should be implemented in subclasses if possible.
355 unsigned int RtApi :: getDefaultOutputDevice( void )
357 // Should be implemented in subclasses if possible.
361 void RtApi :: closeStream( void )
363 // MUST be implemented in subclasses!
367 bool RtApi :: probeDeviceOpen( unsigned int /*device*/, StreamMode /*mode*/, unsigned int /*channels*/,
368 unsigned int /*firstChannel*/, unsigned int /*sampleRate*/,
369 RtAudioFormat /*format*/, unsigned int * /*bufferSize*/,
370 RtAudio::StreamOptions * /*options*/ )
372 // MUST be implemented in subclasses!
376 void RtApi :: tickStreamTime( void )
378 // Subclasses that do not provide their own implementation of
379 // getStreamTime should call this function once per buffer I/O to
380 // provide basic stream time support.
382 stream_.streamTime += ( stream_.bufferSize * 1.0 / stream_.sampleRate );
384 #if defined( HAVE_GETTIMEOFDAY )
385 gettimeofday( &stream_.lastTickTimestamp, NULL );
389 long RtApi :: getStreamLatency( void )
393 long totalLatency = 0;
394 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
395 totalLatency = stream_.latency[0];
396 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
397 totalLatency += stream_.latency[1];
402 double RtApi :: getStreamTime( void )
406 #if defined( HAVE_GETTIMEOFDAY )
407 // Return a very accurate estimate of the stream time by
408 // adding in the elapsed time since the last tick.
412 if ( stream_.state != STREAM_RUNNING || (stream_.lastTickTimestamp.tv_sec == 0 && stream_.lastTickTimestamp.tv_usec == 0) )
413 return stream_.streamTime;
415 gettimeofday( &now, NULL );
416 then = stream_.lastTickTimestamp;
417 return stream_.streamTime +
418 ((now.tv_sec + 0.000001 * now.tv_usec) -
419 (then.tv_sec + 0.000001 * then.tv_usec));
421 return stream_.streamTime;
425 void RtApi :: setStreamTime( double time )
430 stream_.streamTime = time;
431 #if defined( HAVE_GETTIMEOFDAY )
432 gettimeofday( &stream_.lastTickTimestamp, NULL );
436 unsigned int RtApi :: getStreamSampleRate( void )
440 return stream_.sampleRate;
443 void RtApi :: startStream( void )
445 #if defined( HAVE_GETTIMEOFDAY )
446 stream_.lastTickTimestamp.tv_sec = 0;
447 stream_.lastTickTimestamp.tv_usec = 0;
452 // *************************************************** //
454 // OS/API-specific methods.
456 // *************************************************** //
458 #if defined(__MACOSX_CORE__)
460 // The OS X CoreAudio API is designed to use a separate callback
461 // procedure for each of its audio devices. A single RtAudio duplex
462 // stream using two different devices is supported here, though it
463 // cannot be guaranteed to always behave correctly because we cannot
464 // synchronize these two callbacks.
466 // A property listener is installed for over/underrun information.
467 // However, no functionality is currently provided to allow property
468 // listeners to trigger user handlers because it is unclear what could
469 // be done if a critical stream parameter (buffer size, sample rate,
470 // device disconnect) notification arrived. The listeners entail
471 // quite a bit of extra code and most likely, a user program wouldn't
472 // be prepared for the result anyway. However, we do provide a flag
473 // to the client callback function to inform of an over/underrun.
475 // A structure to hold various information related to the CoreAudio API
478 AudioDeviceID id[2]; // device ids
479 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
480 AudioDeviceIOProcID procId[2];
482 UInt32 iStream[2]; // device stream index (or first if using multiple)
483 UInt32 nStreams[2]; // number of streams to use
486 pthread_cond_t condition;
487 int drainCounter; // Tracks callback counts when draining
488 bool internalDrain; // Indicates if stop is initiated from callback or not.
491 :deviceBuffer(0), drainCounter(0), internalDrain(false) { nStreams[0] = 1; nStreams[1] = 1; id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
494 RtApiCore:: RtApiCore()
496 #if defined( AVAILABLE_MAC_OS_X_VERSION_10_6_AND_LATER )
497 // This is a largely undocumented but absolutely necessary
498 // requirement starting with OS-X 10.6. If not called, queries and
499 // updates to various audio device properties are not handled
501 CFRunLoopRef theRunLoop = NULL;
502 AudioObjectPropertyAddress property = { kAudioHardwarePropertyRunLoop,
503 kAudioObjectPropertyScopeGlobal,
504 kAudioObjectPropertyElementMaster };
505 OSStatus result = AudioObjectSetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, sizeof(CFRunLoopRef), &theRunLoop);
506 if ( result != noErr ) {
507 errorText_ = "RtApiCore::RtApiCore: error setting run loop property!";
508 error( RtAudioError::WARNING );
513 RtApiCore :: ~RtApiCore()
515 // The subclass destructor gets called before the base class
516 // destructor, so close an existing stream before deallocating
517 // apiDeviceId memory.
518 if ( stream_.state != STREAM_CLOSED ) closeStream();
521 unsigned int RtApiCore :: getDeviceCount( void )
523 // Find out how many audio devices there are, if any.
525 AudioObjectPropertyAddress propertyAddress = { kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
526 OSStatus result = AudioObjectGetPropertyDataSize( kAudioObjectSystemObject, &propertyAddress, 0, NULL, &dataSize );
527 if ( result != noErr ) {
528 errorText_ = "RtApiCore::getDeviceCount: OS-X error getting device info!";
529 error( RtAudioError::WARNING );
533 return dataSize / sizeof( AudioDeviceID );
536 unsigned int RtApiCore :: getDefaultInputDevice( void )
538 unsigned int nDevices = getDeviceCount();
539 if ( nDevices <= 1 ) return 0;
542 UInt32 dataSize = sizeof( AudioDeviceID );
543 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultInputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
544 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
545 if ( result != noErr ) {
546 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device.";
547 error( RtAudioError::WARNING );
551 dataSize *= nDevices;
552 AudioDeviceID deviceList[ nDevices ];
553 property.mSelector = kAudioHardwarePropertyDevices;
554 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
555 if ( result != noErr ) {
556 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device IDs.";
557 error( RtAudioError::WARNING );
561 for ( unsigned int i=0; i<nDevices; i++ )
562 if ( id == deviceList[i] ) return i;
564 errorText_ = "RtApiCore::getDefaultInputDevice: No default device found!";
565 error( RtAudioError::WARNING );
569 unsigned int RtApiCore :: getDefaultOutputDevice( void )
571 unsigned int nDevices = getDeviceCount();
572 if ( nDevices <= 1 ) return 0;
575 UInt32 dataSize = sizeof( AudioDeviceID );
576 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultOutputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
577 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
578 if ( result != noErr ) {
579 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device.";
580 error( RtAudioError::WARNING );
584 dataSize = sizeof( AudioDeviceID ) * nDevices;
585 AudioDeviceID deviceList[ nDevices ];
586 property.mSelector = kAudioHardwarePropertyDevices;
587 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
588 if ( result != noErr ) {
589 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device IDs.";
590 error( RtAudioError::WARNING );
594 for ( unsigned int i=0; i<nDevices; i++ )
595 if ( id == deviceList[i] ) return i;
597 errorText_ = "RtApiCore::getDefaultOutputDevice: No default device found!";
598 error( RtAudioError::WARNING );
602 RtAudio::DeviceInfo RtApiCore :: getDeviceInfo( unsigned int device )
604 RtAudio::DeviceInfo info;
608 unsigned int nDevices = getDeviceCount();
609 if ( nDevices == 0 ) {
610 errorText_ = "RtApiCore::getDeviceInfo: no devices found!";
611 error( RtAudioError::INVALID_USE );
615 if ( device >= nDevices ) {
616 errorText_ = "RtApiCore::getDeviceInfo: device ID is invalid!";
617 error( RtAudioError::INVALID_USE );
621 AudioDeviceID deviceList[ nDevices ];
622 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
623 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
624 kAudioObjectPropertyScopeGlobal,
625 kAudioObjectPropertyElementMaster };
626 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
627 0, NULL, &dataSize, (void *) &deviceList );
628 if ( result != noErr ) {
629 errorText_ = "RtApiCore::getDeviceInfo: OS-X system error getting device IDs.";
630 error( RtAudioError::WARNING );
634 AudioDeviceID id = deviceList[ device ];
636 // Get the device name.
639 dataSize = sizeof( CFStringRef );
640 property.mSelector = kAudioObjectPropertyManufacturer;
641 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
642 if ( result != noErr ) {
643 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device manufacturer.";
644 errorText_ = errorStream_.str();
645 error( RtAudioError::WARNING );
649 //const char *mname = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
650 int length = CFStringGetLength(cfname);
651 char *mname = (char *)malloc(length * 3 + 1);
652 #if defined( UNICODE ) || defined( _UNICODE )
653 CFStringGetCString(cfname, mname, length * 3 + 1, kCFStringEncodingUTF8);
655 CFStringGetCString(cfname, mname, length * 3 + 1, CFStringGetSystemEncoding());
657 info.name.append( (const char *)mname, strlen(mname) );
658 info.name.append( ": " );
662 property.mSelector = kAudioObjectPropertyName;
663 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
664 if ( result != noErr ) {
665 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device name.";
666 errorText_ = errorStream_.str();
667 error( RtAudioError::WARNING );
671 //const char *name = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
672 length = CFStringGetLength(cfname);
673 char *name = (char *)malloc(length * 3 + 1);
674 #if defined( UNICODE ) || defined( _UNICODE )
675 CFStringGetCString(cfname, name, length * 3 + 1, kCFStringEncodingUTF8);
677 CFStringGetCString(cfname, name, length * 3 + 1, CFStringGetSystemEncoding());
679 info.name.append( (const char *)name, strlen(name) );
683 // Get the output stream "configuration".
684 AudioBufferList *bufferList = nil;
685 property.mSelector = kAudioDevicePropertyStreamConfiguration;
686 property.mScope = kAudioDevicePropertyScopeOutput;
687 // property.mElement = kAudioObjectPropertyElementWildcard;
689 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
690 if ( result != noErr || dataSize == 0 ) {
691 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration info for device (" << device << ").";
692 errorText_ = errorStream_.str();
693 error( RtAudioError::WARNING );
697 // Allocate the AudioBufferList.
698 bufferList = (AudioBufferList *) malloc( dataSize );
699 if ( bufferList == NULL ) {
700 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating output AudioBufferList.";
701 error( RtAudioError::WARNING );
705 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
706 if ( result != noErr || dataSize == 0 ) {
708 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration for device (" << device << ").";
709 errorText_ = errorStream_.str();
710 error( RtAudioError::WARNING );
714 // Get output channel information.
715 unsigned int i, nStreams = bufferList->mNumberBuffers;
716 for ( i=0; i<nStreams; i++ )
717 info.outputChannels += bufferList->mBuffers[i].mNumberChannels;
720 // Get the input stream "configuration".
721 property.mScope = kAudioDevicePropertyScopeInput;
722 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
723 if ( result != noErr || dataSize == 0 ) {
724 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration info for device (" << device << ").";
725 errorText_ = errorStream_.str();
726 error( RtAudioError::WARNING );
730 // Allocate the AudioBufferList.
731 bufferList = (AudioBufferList *) malloc( dataSize );
732 if ( bufferList == NULL ) {
733 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating input AudioBufferList.";
734 error( RtAudioError::WARNING );
738 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
739 if (result != noErr || dataSize == 0) {
741 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration for device (" << device << ").";
742 errorText_ = errorStream_.str();
743 error( RtAudioError::WARNING );
747 // Get input channel information.
748 nStreams = bufferList->mNumberBuffers;
749 for ( i=0; i<nStreams; i++ )
750 info.inputChannels += bufferList->mBuffers[i].mNumberChannels;
753 // If device opens for both playback and capture, we determine the channels.
754 if ( info.outputChannels > 0 && info.inputChannels > 0 )
755 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
757 // Probe the device sample rates.
758 bool isInput = false;
759 if ( info.outputChannels == 0 ) isInput = true;
761 // Determine the supported sample rates.
762 property.mSelector = kAudioDevicePropertyAvailableNominalSampleRates;
763 if ( isInput == false ) property.mScope = kAudioDevicePropertyScopeOutput;
764 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
765 if ( result != kAudioHardwareNoError || dataSize == 0 ) {
766 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rate info.";
767 errorText_ = errorStream_.str();
768 error( RtAudioError::WARNING );
772 UInt32 nRanges = dataSize / sizeof( AudioValueRange );
773 AudioValueRange rangeList[ nRanges ];
774 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &rangeList );
775 if ( result != kAudioHardwareNoError ) {
776 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rates.";
777 errorText_ = errorStream_.str();
778 error( RtAudioError::WARNING );
782 // The sample rate reporting mechanism is a bit of a mystery. It
783 // seems that it can either return individual rates or a range of
784 // rates. I assume that if the min / max range values are the same,
785 // then that represents a single supported rate and if the min / max
786 // range values are different, the device supports an arbitrary
787 // range of values (though there might be multiple ranges, so we'll
788 // use the most conservative range).
789 Float64 minimumRate = 1.0, maximumRate = 10000000000.0;
790 bool haveValueRange = false;
791 info.sampleRates.clear();
792 for ( UInt32 i=0; i<nRanges; i++ ) {
793 if ( rangeList[i].mMinimum == rangeList[i].mMaximum ) {
794 unsigned int tmpSr = (unsigned int) rangeList[i].mMinimum;
795 info.sampleRates.push_back( tmpSr );
797 if ( !info.preferredSampleRate || ( tmpSr <= 48000 && tmpSr > info.preferredSampleRate ) )
798 info.preferredSampleRate = tmpSr;
801 haveValueRange = true;
802 if ( rangeList[i].mMinimum > minimumRate ) minimumRate = rangeList[i].mMinimum;
803 if ( rangeList[i].mMaximum < maximumRate ) maximumRate = rangeList[i].mMaximum;
807 if ( haveValueRange ) {
808 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
809 if ( SAMPLE_RATES[k] >= (unsigned int) minimumRate && SAMPLE_RATES[k] <= (unsigned int) maximumRate ) {
810 info.sampleRates.push_back( SAMPLE_RATES[k] );
812 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
813 info.preferredSampleRate = SAMPLE_RATES[k];
818 // Sort and remove any redundant values
819 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
820 info.sampleRates.erase( unique( info.sampleRates.begin(), info.sampleRates.end() ), info.sampleRates.end() );
822 if ( info.sampleRates.size() == 0 ) {
823 errorStream_ << "RtApiCore::probeDeviceInfo: No supported sample rates found for device (" << device << ").";
824 errorText_ = errorStream_.str();
825 error( RtAudioError::WARNING );
829 // CoreAudio always uses 32-bit floating point data for PCM streams.
830 // Thus, any other "physical" formats supported by the device are of
831 // no interest to the client.
832 info.nativeFormats = RTAUDIO_FLOAT32;
834 if ( info.outputChannels > 0 )
835 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
836 if ( info.inputChannels > 0 )
837 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
843 static OSStatus callbackHandler( AudioDeviceID inDevice,
844 const AudioTimeStamp* /*inNow*/,
845 const AudioBufferList* inInputData,
846 const AudioTimeStamp* /*inInputTime*/,
847 AudioBufferList* outOutputData,
848 const AudioTimeStamp* /*inOutputTime*/,
851 CallbackInfo *info = (CallbackInfo *) infoPointer;
853 RtApiCore *object = (RtApiCore *) info->object;
854 if ( object->callbackEvent( inDevice, inInputData, outOutputData ) == false )
855 return kAudioHardwareUnspecifiedError;
857 return kAudioHardwareNoError;
860 static OSStatus xrunListener( AudioObjectID /*inDevice*/,
862 const AudioObjectPropertyAddress properties[],
863 void* handlePointer )
865 CoreHandle *handle = (CoreHandle *) handlePointer;
866 for ( UInt32 i=0; i<nAddresses; i++ ) {
867 if ( properties[i].mSelector == kAudioDeviceProcessorOverload ) {
868 if ( properties[i].mScope == kAudioDevicePropertyScopeInput )
869 handle->xrun[1] = true;
871 handle->xrun[0] = true;
875 return kAudioHardwareNoError;
878 static OSStatus rateListener( AudioObjectID inDevice,
879 UInt32 /*nAddresses*/,
880 const AudioObjectPropertyAddress /*properties*/[],
883 Float64 *rate = (Float64 *) ratePointer;
884 UInt32 dataSize = sizeof( Float64 );
885 AudioObjectPropertyAddress property = { kAudioDevicePropertyNominalSampleRate,
886 kAudioObjectPropertyScopeGlobal,
887 kAudioObjectPropertyElementMaster };
888 AudioObjectGetPropertyData( inDevice, &property, 0, NULL, &dataSize, rate );
889 return kAudioHardwareNoError;
892 bool RtApiCore :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
893 unsigned int firstChannel, unsigned int sampleRate,
894 RtAudioFormat format, unsigned int *bufferSize,
895 RtAudio::StreamOptions *options )
898 unsigned int nDevices = getDeviceCount();
899 if ( nDevices == 0 ) {
900 // This should not happen because a check is made before this function is called.
901 errorText_ = "RtApiCore::probeDeviceOpen: no devices found!";
905 if ( device >= nDevices ) {
906 // This should not happen because a check is made before this function is called.
907 errorText_ = "RtApiCore::probeDeviceOpen: device ID is invalid!";
911 AudioDeviceID deviceList[ nDevices ];
912 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
913 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
914 kAudioObjectPropertyScopeGlobal,
915 kAudioObjectPropertyElementMaster };
916 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
917 0, NULL, &dataSize, (void *) &deviceList );
918 if ( result != noErr ) {
919 errorText_ = "RtApiCore::probeDeviceOpen: OS-X system error getting device IDs.";
923 AudioDeviceID id = deviceList[ device ];
925 // Setup for stream mode.
926 bool isInput = false;
927 if ( mode == INPUT ) {
929 property.mScope = kAudioDevicePropertyScopeInput;
932 property.mScope = kAudioDevicePropertyScopeOutput;
934 // Get the stream "configuration".
935 AudioBufferList *bufferList = nil;
937 property.mSelector = kAudioDevicePropertyStreamConfiguration;
938 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
939 if ( result != noErr || dataSize == 0 ) {
940 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration info for device (" << device << ").";
941 errorText_ = errorStream_.str();
945 // Allocate the AudioBufferList.
946 bufferList = (AudioBufferList *) malloc( dataSize );
947 if ( bufferList == NULL ) {
948 errorText_ = "RtApiCore::probeDeviceOpen: memory error allocating AudioBufferList.";
952 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
953 if (result != noErr || dataSize == 0) {
955 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration for device (" << device << ").";
956 errorText_ = errorStream_.str();
960 // Search for one or more streams that contain the desired number of
961 // channels. CoreAudio devices can have an arbitrary number of
962 // streams and each stream can have an arbitrary number of channels.
963 // For each stream, a single buffer of interleaved samples is
964 // provided. RtAudio prefers the use of one stream of interleaved
965 // data or multiple consecutive single-channel streams. However, we
966 // now support multiple consecutive multi-channel streams of
967 // interleaved data as well.
968 UInt32 iStream, offsetCounter = firstChannel;
969 UInt32 nStreams = bufferList->mNumberBuffers;
970 bool monoMode = false;
971 bool foundStream = false;
973 // First check that the device supports the requested number of
975 UInt32 deviceChannels = 0;
976 for ( iStream=0; iStream<nStreams; iStream++ )
977 deviceChannels += bufferList->mBuffers[iStream].mNumberChannels;
979 if ( deviceChannels < ( channels + firstChannel ) ) {
981 errorStream_ << "RtApiCore::probeDeviceOpen: the device (" << device << ") does not support the requested channel count.";
982 errorText_ = errorStream_.str();
986 // Look for a single stream meeting our needs.
987 UInt32 firstStream, streamCount = 1, streamChannels = 0, channelOffset = 0;
988 for ( iStream=0; iStream<nStreams; iStream++ ) {
989 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
990 if ( streamChannels >= channels + offsetCounter ) {
991 firstStream = iStream;
992 channelOffset = offsetCounter;
996 if ( streamChannels > offsetCounter ) break;
997 offsetCounter -= streamChannels;
1000 // If we didn't find a single stream above, then we should be able
1001 // to meet the channel specification with multiple streams.
1002 if ( foundStream == false ) {
1004 offsetCounter = firstChannel;
1005 for ( iStream=0; iStream<nStreams; iStream++ ) {
1006 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
1007 if ( streamChannels > offsetCounter ) break;
1008 offsetCounter -= streamChannels;
1011 firstStream = iStream;
1012 channelOffset = offsetCounter;
1013 Int32 channelCounter = channels + offsetCounter - streamChannels;
1015 if ( streamChannels > 1 ) monoMode = false;
1016 while ( channelCounter > 0 ) {
1017 streamChannels = bufferList->mBuffers[++iStream].mNumberChannels;
1018 if ( streamChannels > 1 ) monoMode = false;
1019 channelCounter -= streamChannels;
1026 // Determine the buffer size.
1027 AudioValueRange bufferRange;
1028 dataSize = sizeof( AudioValueRange );
1029 property.mSelector = kAudioDevicePropertyBufferFrameSizeRange;
1030 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &bufferRange );
1032 if ( result != noErr ) {
1033 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting buffer size range for device (" << device << ").";
1034 errorText_ = errorStream_.str();
1038 if ( bufferRange.mMinimum > *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMinimum;
1039 else if ( bufferRange.mMaximum < *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMaximum;
1040 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) *bufferSize = (unsigned long) bufferRange.mMinimum;
1042 // Set the buffer size. For multiple streams, I'm assuming we only
1043 // need to make this setting for the master channel.
1044 UInt32 theSize = (UInt32) *bufferSize;
1045 dataSize = sizeof( UInt32 );
1046 property.mSelector = kAudioDevicePropertyBufferFrameSize;
1047 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &theSize );
1049 if ( result != noErr ) {
1050 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting the buffer size for device (" << device << ").";
1051 errorText_ = errorStream_.str();
1055 // If attempting to setup a duplex stream, the bufferSize parameter
1056 // MUST be the same in both directions!
1057 *bufferSize = theSize;
1058 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
1059 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << device << ").";
1060 errorText_ = errorStream_.str();
1064 stream_.bufferSize = *bufferSize;
1065 stream_.nBuffers = 1;
1067 // Try to set "hog" mode ... it's not clear to me this is working.
1068 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) {
1070 dataSize = sizeof( hog_pid );
1071 property.mSelector = kAudioDevicePropertyHogMode;
1072 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &hog_pid );
1073 if ( result != noErr ) {
1074 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting 'hog' state!";
1075 errorText_ = errorStream_.str();
1079 if ( hog_pid != getpid() ) {
1081 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &hog_pid );
1082 if ( result != noErr ) {
1083 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting 'hog' state!";
1084 errorText_ = errorStream_.str();
1090 // Check and if necessary, change the sample rate for the device.
1091 Float64 nominalRate;
1092 dataSize = sizeof( Float64 );
1093 property.mSelector = kAudioDevicePropertyNominalSampleRate;
1094 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &nominalRate );
1095 if ( result != noErr ) {
1096 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting current sample rate.";
1097 errorText_ = errorStream_.str();
1101 // Only change the sample rate if off by more than 1 Hz.
1102 if ( fabs( nominalRate - (double)sampleRate ) > 1.0 ) {
1104 // Set a property listener for the sample rate change
1105 Float64 reportedRate = 0.0;
1106 AudioObjectPropertyAddress tmp = { kAudioDevicePropertyNominalSampleRate, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
1107 result = AudioObjectAddPropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1108 if ( result != noErr ) {
1109 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate property listener for device (" << device << ").";
1110 errorText_ = errorStream_.str();
1114 nominalRate = (Float64) sampleRate;
1115 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &nominalRate );
1116 if ( result != noErr ) {
1117 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1118 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate for device (" << device << ").";
1119 errorText_ = errorStream_.str();
1123 // Now wait until the reported nominal rate is what we just set.
1124 UInt32 microCounter = 0;
1125 while ( reportedRate != nominalRate ) {
1126 microCounter += 5000;
1127 if ( microCounter > 5000000 ) break;
1131 // Remove the property listener.
1132 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1134 if ( microCounter > 5000000 ) {
1135 errorStream_ << "RtApiCore::probeDeviceOpen: timeout waiting for sample rate update for device (" << device << ").";
1136 errorText_ = errorStream_.str();
1141 // Now set the stream format for all streams. Also, check the
1142 // physical format of the device and change that if necessary.
1143 AudioStreamBasicDescription description;
1144 dataSize = sizeof( AudioStreamBasicDescription );
1145 property.mSelector = kAudioStreamPropertyVirtualFormat;
1146 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
1147 if ( result != noErr ) {
1148 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream format for device (" << device << ").";
1149 errorText_ = errorStream_.str();
1153 // Set the sample rate and data format id. However, only make the
1154 // change if the sample rate is not within 1.0 of the desired
1155 // rate and the format is not linear pcm.
1156 bool updateFormat = false;
1157 if ( fabs( description.mSampleRate - (Float64)sampleRate ) > 1.0 ) {
1158 description.mSampleRate = (Float64) sampleRate;
1159 updateFormat = true;
1162 if ( description.mFormatID != kAudioFormatLinearPCM ) {
1163 description.mFormatID = kAudioFormatLinearPCM;
1164 updateFormat = true;
1167 if ( updateFormat ) {
1168 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &description );
1169 if ( result != noErr ) {
1170 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate or data format for device (" << device << ").";
1171 errorText_ = errorStream_.str();
1176 // Now check the physical format.
1177 property.mSelector = kAudioStreamPropertyPhysicalFormat;
1178 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
1179 if ( result != noErr ) {
1180 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream physical format for device (" << device << ").";
1181 errorText_ = errorStream_.str();
1185 //std::cout << "Current physical stream format:" << std::endl;
1186 //std::cout << " mBitsPerChan = " << description.mBitsPerChannel << std::endl;
1187 //std::cout << " aligned high = " << (description.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (description.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
1188 //std::cout << " bytesPerFrame = " << description.mBytesPerFrame << std::endl;
1189 //std::cout << " sample rate = " << description.mSampleRate << std::endl;
1191 if ( description.mFormatID != kAudioFormatLinearPCM || description.mBitsPerChannel < 16 ) {
1192 description.mFormatID = kAudioFormatLinearPCM;
1193 //description.mSampleRate = (Float64) sampleRate;
1194 AudioStreamBasicDescription testDescription = description;
1197 // We'll try higher bit rates first and then work our way down.
1198 std::vector< std::pair<UInt32, UInt32> > physicalFormats;
1199 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsFloat) & ~kLinearPCMFormatFlagIsSignedInteger;
1200 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
1201 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
1202 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
1203 physicalFormats.push_back( std::pair<Float32, UInt32>( 24, formatFlags ) ); // 24-bit packed
1204 formatFlags &= ~( kAudioFormatFlagIsPacked | kAudioFormatFlagIsAlignedHigh );
1205 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.2, formatFlags ) ); // 24-bit in 4 bytes, aligned low
1206 formatFlags |= kAudioFormatFlagIsAlignedHigh;
1207 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.4, formatFlags ) ); // 24-bit in 4 bytes, aligned high
1208 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
1209 physicalFormats.push_back( std::pair<Float32, UInt32>( 16, formatFlags ) );
1210 physicalFormats.push_back( std::pair<Float32, UInt32>( 8, formatFlags ) );
1212 bool setPhysicalFormat = false;
1213 for( unsigned int i=0; i<physicalFormats.size(); i++ ) {
1214 testDescription = description;
1215 testDescription.mBitsPerChannel = (UInt32) physicalFormats[i].first;
1216 testDescription.mFormatFlags = physicalFormats[i].second;
1217 if ( (24 == (UInt32)physicalFormats[i].first) && ~( physicalFormats[i].second & kAudioFormatFlagIsPacked ) )
1218 testDescription.mBytesPerFrame = 4 * testDescription.mChannelsPerFrame;
1220 testDescription.mBytesPerFrame = testDescription.mBitsPerChannel/8 * testDescription.mChannelsPerFrame;
1221 testDescription.mBytesPerPacket = testDescription.mBytesPerFrame * testDescription.mFramesPerPacket;
1222 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &testDescription );
1223 if ( result == noErr ) {
1224 setPhysicalFormat = true;
1225 //std::cout << "Updated physical stream format:" << std::endl;
1226 //std::cout << " mBitsPerChan = " << testDescription.mBitsPerChannel << std::endl;
1227 //std::cout << " aligned high = " << (testDescription.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (testDescription.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
1228 //std::cout << " bytesPerFrame = " << testDescription.mBytesPerFrame << std::endl;
1229 //std::cout << " sample rate = " << testDescription.mSampleRate << std::endl;
1234 if ( !setPhysicalFormat ) {
1235 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting physical data format for device (" << device << ").";
1236 errorText_ = errorStream_.str();
1239 } // done setting virtual/physical formats.
1241 // Get the stream / device latency.
1243 dataSize = sizeof( UInt32 );
1244 property.mSelector = kAudioDevicePropertyLatency;
1245 if ( AudioObjectHasProperty( id, &property ) == true ) {
1246 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &latency );
1247 if ( result == kAudioHardwareNoError ) stream_.latency[ mode ] = latency;
1249 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting device latency for device (" << device << ").";
1250 errorText_ = errorStream_.str();
1251 error( RtAudioError::WARNING );
1255 // Byte-swapping: According to AudioHardware.h, the stream data will
1256 // always be presented in native-endian format, so we should never
1257 // need to byte swap.
1258 stream_.doByteSwap[mode] = false;
1260 // From the CoreAudio documentation, PCM data must be supplied as
1262 stream_.userFormat = format;
1263 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
1265 if ( streamCount == 1 )
1266 stream_.nDeviceChannels[mode] = description.mChannelsPerFrame;
1267 else // multiple streams
1268 stream_.nDeviceChannels[mode] = channels;
1269 stream_.nUserChannels[mode] = channels;
1270 stream_.channelOffset[mode] = channelOffset; // offset within a CoreAudio stream
1271 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
1272 else stream_.userInterleaved = true;
1273 stream_.deviceInterleaved[mode] = true;
1274 if ( monoMode == true ) stream_.deviceInterleaved[mode] = false;
1276 // Set flags for buffer conversion.
1277 stream_.doConvertBuffer[mode] = false;
1278 if ( stream_.userFormat != stream_.deviceFormat[mode] )
1279 stream_.doConvertBuffer[mode] = true;
1280 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
1281 stream_.doConvertBuffer[mode] = true;
1282 if ( streamCount == 1 ) {
1283 if ( stream_.nUserChannels[mode] > 1 &&
1284 stream_.userInterleaved != stream_.deviceInterleaved[mode] )
1285 stream_.doConvertBuffer[mode] = true;
1287 else if ( monoMode && stream_.userInterleaved )
1288 stream_.doConvertBuffer[mode] = true;
1290 // Allocate our CoreHandle structure for the stream.
1291 CoreHandle *handle = 0;
1292 if ( stream_.apiHandle == 0 ) {
1294 handle = new CoreHandle;
1296 catch ( std::bad_alloc& ) {
1297 errorText_ = "RtApiCore::probeDeviceOpen: error allocating CoreHandle memory.";
1301 if ( pthread_cond_init( &handle->condition, NULL ) ) {
1302 errorText_ = "RtApiCore::probeDeviceOpen: error initializing pthread condition variable.";
1305 stream_.apiHandle = (void *) handle;
1308 handle = (CoreHandle *) stream_.apiHandle;
1309 handle->iStream[mode] = firstStream;
1310 handle->nStreams[mode] = streamCount;
1311 handle->id[mode] = id;
1313 // Allocate necessary internal buffers.
1314 unsigned long bufferBytes;
1315 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
1316 // stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
1317 stream_.userBuffer[mode] = (char *) malloc( bufferBytes * sizeof(char) );
1318 memset( stream_.userBuffer[mode], 0, bufferBytes * sizeof(char) );
1319 if ( stream_.userBuffer[mode] == NULL ) {
1320 errorText_ = "RtApiCore::probeDeviceOpen: error allocating user buffer memory.";
1324 // If possible, we will make use of the CoreAudio stream buffers as
1325 // "device buffers". However, we can't do this if using multiple
1327 if ( stream_.doConvertBuffer[mode] && handle->nStreams[mode] > 1 ) {
1329 bool makeBuffer = true;
1330 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
1331 if ( mode == INPUT ) {
1332 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
1333 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
1334 if ( bufferBytes <= bytesOut ) makeBuffer = false;
1339 bufferBytes *= *bufferSize;
1340 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
1341 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
1342 if ( stream_.deviceBuffer == NULL ) {
1343 errorText_ = "RtApiCore::probeDeviceOpen: error allocating device buffer memory.";
1349 stream_.sampleRate = sampleRate;
1350 stream_.device[mode] = device;
1351 stream_.state = STREAM_STOPPED;
1352 stream_.callbackInfo.object = (void *) this;
1354 // Setup the buffer conversion information structure.
1355 if ( stream_.doConvertBuffer[mode] ) {
1356 if ( streamCount > 1 ) setConvertInfo( mode, 0 );
1357 else setConvertInfo( mode, channelOffset );
1360 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device )
1361 // Only one callback procedure per device.
1362 stream_.mode = DUPLEX;
1364 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1365 result = AudioDeviceCreateIOProcID( id, callbackHandler, (void *) &stream_.callbackInfo, &handle->procId[mode] );
1367 // deprecated in favor of AudioDeviceCreateIOProcID()
1368 result = AudioDeviceAddIOProc( id, callbackHandler, (void *) &stream_.callbackInfo );
1370 if ( result != noErr ) {
1371 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting callback for device (" << device << ").";
1372 errorText_ = errorStream_.str();
1375 if ( stream_.mode == OUTPUT && mode == INPUT )
1376 stream_.mode = DUPLEX;
1378 stream_.mode = mode;
1381 // Setup the device property listener for over/underload.
1382 property.mSelector = kAudioDeviceProcessorOverload;
1383 property.mScope = kAudioObjectPropertyScopeGlobal;
1384 result = AudioObjectAddPropertyListener( id, &property, xrunListener, (void *) handle );
1390 pthread_cond_destroy( &handle->condition );
1392 stream_.apiHandle = 0;
1395 for ( int i=0; i<2; i++ ) {
1396 if ( stream_.userBuffer[i] ) {
1397 free( stream_.userBuffer[i] );
1398 stream_.userBuffer[i] = 0;
1402 if ( stream_.deviceBuffer ) {
1403 free( stream_.deviceBuffer );
1404 stream_.deviceBuffer = 0;
1407 stream_.state = STREAM_CLOSED;
1411 void RtApiCore :: closeStream( void )
1413 if ( stream_.state == STREAM_CLOSED ) {
1414 errorText_ = "RtApiCore::closeStream(): no open stream to close!";
1415 error( RtAudioError::WARNING );
1419 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1420 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1422 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
1423 kAudioObjectPropertyScopeGlobal,
1424 kAudioObjectPropertyElementMaster };
1426 property.mSelector = kAudioDeviceProcessorOverload;
1427 property.mScope = kAudioObjectPropertyScopeGlobal;
1428 if (AudioObjectRemovePropertyListener( handle->id[0], &property, xrunListener, (void *) handle ) != noErr) {
1429 errorText_ = "RtApiCore::closeStream(): error removing property listener!";
1430 error( RtAudioError::WARNING );
1433 if ( stream_.state == STREAM_RUNNING )
1434 AudioDeviceStop( handle->id[0], callbackHandler );
1435 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1436 AudioDeviceDestroyIOProcID( handle->id[0], handle->procId[0] );
1438 // deprecated in favor of AudioDeviceDestroyIOProcID()
1439 AudioDeviceRemoveIOProc( handle->id[0], callbackHandler );
1443 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1445 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
1446 kAudioObjectPropertyScopeGlobal,
1447 kAudioObjectPropertyElementMaster };
1449 property.mSelector = kAudioDeviceProcessorOverload;
1450 property.mScope = kAudioObjectPropertyScopeGlobal;
1451 if (AudioObjectRemovePropertyListener( handle->id[1], &property, xrunListener, (void *) handle ) != noErr) {
1452 errorText_ = "RtApiCore::closeStream(): error removing property listener!";
1453 error( RtAudioError::WARNING );
1456 if ( stream_.state == STREAM_RUNNING )
1457 AudioDeviceStop( handle->id[1], callbackHandler );
1458 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1459 AudioDeviceDestroyIOProcID( handle->id[1], handle->procId[1] );
1461 // deprecated in favor of AudioDeviceDestroyIOProcID()
1462 AudioDeviceRemoveIOProc( handle->id[1], callbackHandler );
1466 for ( int i=0; i<2; i++ ) {
1467 if ( stream_.userBuffer[i] ) {
1468 free( stream_.userBuffer[i] );
1469 stream_.userBuffer[i] = 0;
1473 if ( stream_.deviceBuffer ) {
1474 free( stream_.deviceBuffer );
1475 stream_.deviceBuffer = 0;
1478 // Destroy pthread condition variable.
1479 pthread_cond_destroy( &handle->condition );
1481 stream_.apiHandle = 0;
1483 stream_.mode = UNINITIALIZED;
1484 stream_.state = STREAM_CLOSED;
1487 void RtApiCore :: startStream( void )
1490 RtApi::startStream();
1491 if ( stream_.state == STREAM_RUNNING ) {
1492 errorText_ = "RtApiCore::startStream(): the stream is already running!";
1493 error( RtAudioError::WARNING );
1497 OSStatus result = noErr;
1498 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1499 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1501 result = AudioDeviceStart( handle->id[0], callbackHandler );
1502 if ( result != noErr ) {
1503 errorStream_ << "RtApiCore::startStream: system error (" << getErrorCode( result ) << ") starting callback procedure on device (" << stream_.device[0] << ").";
1504 errorText_ = errorStream_.str();
1509 if ( stream_.mode == INPUT ||
1510 ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1512 result = AudioDeviceStart( handle->id[1], callbackHandler );
1513 if ( result != noErr ) {
1514 errorStream_ << "RtApiCore::startStream: system error starting input callback procedure on device (" << stream_.device[1] << ").";
1515 errorText_ = errorStream_.str();
1520 handle->drainCounter = 0;
1521 handle->internalDrain = false;
1522 stream_.state = STREAM_RUNNING;
1525 if ( result == noErr ) return;
1526 error( RtAudioError::SYSTEM_ERROR );
1529 void RtApiCore :: stopStream( void )
1532 if ( stream_.state == STREAM_STOPPED ) {
1533 errorText_ = "RtApiCore::stopStream(): the stream is already stopped!";
1534 error( RtAudioError::WARNING );
1538 OSStatus result = noErr;
1539 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1540 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1542 if ( handle->drainCounter == 0 ) {
1543 handle->drainCounter = 2;
1544 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
1547 result = AudioDeviceStop( handle->id[0], callbackHandler );
1548 if ( result != noErr ) {
1549 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping callback procedure on device (" << stream_.device[0] << ").";
1550 errorText_ = errorStream_.str();
1555 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1557 result = AudioDeviceStop( handle->id[1], callbackHandler );
1558 if ( result != noErr ) {
1559 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping input callback procedure on device (" << stream_.device[1] << ").";
1560 errorText_ = errorStream_.str();
1565 stream_.state = STREAM_STOPPED;
1568 if ( result == noErr ) return;
1569 error( RtAudioError::SYSTEM_ERROR );
1572 void RtApiCore :: abortStream( void )
1575 if ( stream_.state == STREAM_STOPPED ) {
1576 errorText_ = "RtApiCore::abortStream(): the stream is already stopped!";
1577 error( RtAudioError::WARNING );
1581 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1582 handle->drainCounter = 2;
1587 // This function will be called by a spawned thread when the user
1588 // callback function signals that the stream should be stopped or
1589 // aborted. It is better to handle it this way because the
1590 // callbackEvent() function probably should return before the AudioDeviceStop()
1591 // function is called.
1592 static void *coreStopStream( void *ptr )
1594 CallbackInfo *info = (CallbackInfo *) ptr;
1595 RtApiCore *object = (RtApiCore *) info->object;
1597 object->stopStream();
1598 pthread_exit( NULL );
1601 bool RtApiCore :: callbackEvent( AudioDeviceID deviceId,
1602 const AudioBufferList *inBufferList,
1603 const AudioBufferList *outBufferList )
1605 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
1606 if ( stream_.state == STREAM_CLOSED ) {
1607 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
1608 error( RtAudioError::WARNING );
1612 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
1613 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1615 // Check if we were draining the stream and signal is finished.
1616 if ( handle->drainCounter > 3 ) {
1617 ThreadHandle threadId;
1619 stream_.state = STREAM_STOPPING;
1620 if ( handle->internalDrain == true )
1621 pthread_create( &threadId, NULL, coreStopStream, info );
1622 else // external call to stopStream()
1623 pthread_cond_signal( &handle->condition );
1627 AudioDeviceID outputDevice = handle->id[0];
1629 // Invoke user callback to get fresh output data UNLESS we are
1630 // draining stream or duplex mode AND the input/output devices are
1631 // different AND this function is called for the input device.
1632 if ( handle->drainCounter == 0 && ( stream_.mode != DUPLEX || deviceId == outputDevice ) ) {
1633 RtAudioCallback callback = (RtAudioCallback) info->callback;
1634 double streamTime = getStreamTime();
1635 RtAudioStreamStatus status = 0;
1636 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
1637 status |= RTAUDIO_OUTPUT_UNDERFLOW;
1638 handle->xrun[0] = false;
1640 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
1641 status |= RTAUDIO_INPUT_OVERFLOW;
1642 handle->xrun[1] = false;
1645 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
1646 stream_.bufferSize, streamTime, status, info->userData );
1647 if ( cbReturnValue == 2 ) {
1648 stream_.state = STREAM_STOPPING;
1649 handle->drainCounter = 2;
1653 else if ( cbReturnValue == 1 ) {
1654 handle->drainCounter = 1;
1655 handle->internalDrain = true;
1659 if ( stream_.mode == OUTPUT || ( stream_.mode == DUPLEX && deviceId == outputDevice ) ) {
1661 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
1663 if ( handle->nStreams[0] == 1 ) {
1664 memset( outBufferList->mBuffers[handle->iStream[0]].mData,
1666 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
1668 else { // fill multiple streams with zeros
1669 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
1670 memset( outBufferList->mBuffers[handle->iStream[0]+i].mData,
1672 outBufferList->mBuffers[handle->iStream[0]+i].mDataByteSize );
1676 else if ( handle->nStreams[0] == 1 ) {
1677 if ( stream_.doConvertBuffer[0] ) { // convert directly to CoreAudio stream buffer
1678 convertBuffer( (char *) outBufferList->mBuffers[handle->iStream[0]].mData,
1679 stream_.userBuffer[0], stream_.convertInfo[0] );
1681 else { // copy from user buffer
1682 memcpy( outBufferList->mBuffers[handle->iStream[0]].mData,
1683 stream_.userBuffer[0],
1684 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
1687 else { // fill multiple streams
1688 Float32 *inBuffer = (Float32 *) stream_.userBuffer[0];
1689 if ( stream_.doConvertBuffer[0] ) {
1690 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
1691 inBuffer = (Float32 *) stream_.deviceBuffer;
1694 if ( stream_.deviceInterleaved[0] == false ) { // mono mode
1695 UInt32 bufferBytes = outBufferList->mBuffers[handle->iStream[0]].mDataByteSize;
1696 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
1697 memcpy( outBufferList->mBuffers[handle->iStream[0]+i].mData,
1698 (void *)&inBuffer[i*stream_.bufferSize], bufferBytes );
1701 else { // fill multiple multi-channel streams with interleaved data
1702 UInt32 streamChannels, channelsLeft, inJump, outJump, inOffset;
1705 bool inInterleaved = ( stream_.userInterleaved ) ? true : false;
1706 UInt32 inChannels = stream_.nUserChannels[0];
1707 if ( stream_.doConvertBuffer[0] ) {
1708 inInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
1709 inChannels = stream_.nDeviceChannels[0];
1712 if ( inInterleaved ) inOffset = 1;
1713 else inOffset = stream_.bufferSize;
1715 channelsLeft = inChannels;
1716 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
1718 out = (Float32 *) outBufferList->mBuffers[handle->iStream[0]+i].mData;
1719 streamChannels = outBufferList->mBuffers[handle->iStream[0]+i].mNumberChannels;
1722 // Account for possible channel offset in first stream
1723 if ( i == 0 && stream_.channelOffset[0] > 0 ) {
1724 streamChannels -= stream_.channelOffset[0];
1725 outJump = stream_.channelOffset[0];
1729 // Account for possible unfilled channels at end of the last stream
1730 if ( streamChannels > channelsLeft ) {
1731 outJump = streamChannels - channelsLeft;
1732 streamChannels = channelsLeft;
1735 // Determine input buffer offsets and skips
1736 if ( inInterleaved ) {
1737 inJump = inChannels;
1738 in += inChannels - channelsLeft;
1742 in += (inChannels - channelsLeft) * inOffset;
1745 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
1746 for ( unsigned int j=0; j<streamChannels; j++ ) {
1747 *out++ = in[j*inOffset];
1752 channelsLeft -= streamChannels;
1758 // Don't bother draining input
1759 if ( handle->drainCounter ) {
1760 handle->drainCounter++;
1764 AudioDeviceID inputDevice;
1765 inputDevice = handle->id[1];
1766 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && deviceId == inputDevice ) ) {
1768 if ( handle->nStreams[1] == 1 ) {
1769 if ( stream_.doConvertBuffer[1] ) { // convert directly from CoreAudio stream buffer
1770 convertBuffer( stream_.userBuffer[1],
1771 (char *) inBufferList->mBuffers[handle->iStream[1]].mData,
1772 stream_.convertInfo[1] );
1774 else { // copy to user buffer
1775 memcpy( stream_.userBuffer[1],
1776 inBufferList->mBuffers[handle->iStream[1]].mData,
1777 inBufferList->mBuffers[handle->iStream[1]].mDataByteSize );
1780 else { // read from multiple streams
1781 Float32 *outBuffer = (Float32 *) stream_.userBuffer[1];
1782 if ( stream_.doConvertBuffer[1] ) outBuffer = (Float32 *) stream_.deviceBuffer;
1784 if ( stream_.deviceInterleaved[1] == false ) { // mono mode
1785 UInt32 bufferBytes = inBufferList->mBuffers[handle->iStream[1]].mDataByteSize;
1786 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
1787 memcpy( (void *)&outBuffer[i*stream_.bufferSize],
1788 inBufferList->mBuffers[handle->iStream[1]+i].mData, bufferBytes );
1791 else { // read from multiple multi-channel streams
1792 UInt32 streamChannels, channelsLeft, inJump, outJump, outOffset;
1795 bool outInterleaved = ( stream_.userInterleaved ) ? true : false;
1796 UInt32 outChannels = stream_.nUserChannels[1];
1797 if ( stream_.doConvertBuffer[1] ) {
1798 outInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
1799 outChannels = stream_.nDeviceChannels[1];
1802 if ( outInterleaved ) outOffset = 1;
1803 else outOffset = stream_.bufferSize;
1805 channelsLeft = outChannels;
1806 for ( unsigned int i=0; i<handle->nStreams[1]; i++ ) {
1808 in = (Float32 *) inBufferList->mBuffers[handle->iStream[1]+i].mData;
1809 streamChannels = inBufferList->mBuffers[handle->iStream[1]+i].mNumberChannels;
1812 // Account for possible channel offset in first stream
1813 if ( i == 0 && stream_.channelOffset[1] > 0 ) {
1814 streamChannels -= stream_.channelOffset[1];
1815 inJump = stream_.channelOffset[1];
1819 // Account for possible unread channels at end of the last stream
1820 if ( streamChannels > channelsLeft ) {
1821 inJump = streamChannels - channelsLeft;
1822 streamChannels = channelsLeft;
1825 // Determine output buffer offsets and skips
1826 if ( outInterleaved ) {
1827 outJump = outChannels;
1828 out += outChannels - channelsLeft;
1832 out += (outChannels - channelsLeft) * outOffset;
1835 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
1836 for ( unsigned int j=0; j<streamChannels; j++ ) {
1837 out[j*outOffset] = *in++;
1842 channelsLeft -= streamChannels;
1846 if ( stream_.doConvertBuffer[1] ) { // convert from our internal "device" buffer
1847 convertBuffer( stream_.userBuffer[1],
1848 stream_.deviceBuffer,
1849 stream_.convertInfo[1] );
1855 //MUTEX_UNLOCK( &stream_.mutex );
1857 RtApi::tickStreamTime();
1861 const char* RtApiCore :: getErrorCode( OSStatus code )
1865 case kAudioHardwareNotRunningError:
1866 return "kAudioHardwareNotRunningError";
1868 case kAudioHardwareUnspecifiedError:
1869 return "kAudioHardwareUnspecifiedError";
1871 case kAudioHardwareUnknownPropertyError:
1872 return "kAudioHardwareUnknownPropertyError";
1874 case kAudioHardwareBadPropertySizeError:
1875 return "kAudioHardwareBadPropertySizeError";
1877 case kAudioHardwareIllegalOperationError:
1878 return "kAudioHardwareIllegalOperationError";
1880 case kAudioHardwareBadObjectError:
1881 return "kAudioHardwareBadObjectError";
1883 case kAudioHardwareBadDeviceError:
1884 return "kAudioHardwareBadDeviceError";
1886 case kAudioHardwareBadStreamError:
1887 return "kAudioHardwareBadStreamError";
1889 case kAudioHardwareUnsupportedOperationError:
1890 return "kAudioHardwareUnsupportedOperationError";
1892 case kAudioDeviceUnsupportedFormatError:
1893 return "kAudioDeviceUnsupportedFormatError";
1895 case kAudioDevicePermissionsError:
1896 return "kAudioDevicePermissionsError";
1899 return "CoreAudio unknown error";
1903 //******************** End of __MACOSX_CORE__ *********************//
1906 #if defined(__UNIX_JACK__)
1908 // JACK is a low-latency audio server, originally written for the
1909 // GNU/Linux operating system and now also ported to OS-X. It can
1910 // connect a number of different applications to an audio device, as
1911 // well as allowing them to share audio between themselves.
1913 // When using JACK with RtAudio, "devices" refer to JACK clients that
1914 // have ports connected to the server. The JACK server is typically
1915 // started in a terminal as follows:
1917 // .jackd -d alsa -d hw:0
1919 // or through an interface program such as qjackctl. Many of the
1920 // parameters normally set for a stream are fixed by the JACK server
1921 // and can be specified when the JACK server is started. In
1924 // .jackd -d alsa -d hw:0 -r 44100 -p 512 -n 4
1926 // specifies a sample rate of 44100 Hz, a buffer size of 512 sample
1927 // frames, and number of buffers = 4. Once the server is running, it
1928 // is not possible to override these values. If the values are not
1929 // specified in the command-line, the JACK server uses default values.
1931 // The JACK server does not have to be running when an instance of
1932 // RtApiJack is created, though the function getDeviceCount() will
1933 // report 0 devices found until JACK has been started. When no
1934 // devices are available (i.e., the JACK server is not running), a
1935 // stream cannot be opened.
1937 #include <jack/jack.h>
1941 // A structure to hold various information related to the Jack API
1944 jack_client_t *client;
1945 jack_port_t **ports[2];
1946 std::string deviceName[2];
1948 pthread_cond_t condition;
1949 int drainCounter; // Tracks callback counts when draining
1950 bool internalDrain; // Indicates if stop is initiated from callback or not.
1953 :client(0), drainCounter(0), internalDrain(false) { ports[0] = 0; ports[1] = 0; xrun[0] = false; xrun[1] = false; }
1956 #if !defined(__RTAUDIO_DEBUG__)
1957 static void jackSilentError( const char * ) {};
1960 RtApiJack :: RtApiJack()
1961 :shouldAutoconnect_(true) {
1962 // Nothing to do here.
1963 #if !defined(__RTAUDIO_DEBUG__)
1964 // Turn off Jack's internal error reporting.
1965 jack_set_error_function( &jackSilentError );
1969 RtApiJack :: ~RtApiJack()
1971 if ( stream_.state != STREAM_CLOSED ) closeStream();
1974 unsigned int RtApiJack :: getDeviceCount( void )
1976 // See if we can become a jack client.
1977 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
1978 jack_status_t *status = NULL;
1979 jack_client_t *client = jack_client_open( "RtApiJackCount", options, status );
1980 if ( client == 0 ) return 0;
1983 std::string port, previousPort;
1984 unsigned int nChannels = 0, nDevices = 0;
1985 ports = jack_get_ports( client, NULL, NULL, 0 );
1987 // Parse the port names up to the first colon (:).
1990 port = (char *) ports[ nChannels ];
1991 iColon = port.find(":");
1992 if ( iColon != std::string::npos ) {
1993 port = port.substr( 0, iColon + 1 );
1994 if ( port != previousPort ) {
1996 previousPort = port;
1999 } while ( ports[++nChannels] );
2003 jack_client_close( client );
2007 RtAudio::DeviceInfo RtApiJack :: getDeviceInfo( unsigned int device )
2009 RtAudio::DeviceInfo info;
2010 info.probed = false;
2012 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption
2013 jack_status_t *status = NULL;
2014 jack_client_t *client = jack_client_open( "RtApiJackInfo", options, status );
2015 if ( client == 0 ) {
2016 errorText_ = "RtApiJack::getDeviceInfo: Jack server not found or connection error!";
2017 error( RtAudioError::WARNING );
2022 std::string port, previousPort;
2023 unsigned int nPorts = 0, nDevices = 0;
2024 ports = jack_get_ports( client, NULL, NULL, 0 );
2026 // Parse the port names up to the first colon (:).
2029 port = (char *) ports[ nPorts ];
2030 iColon = port.find(":");
2031 if ( iColon != std::string::npos ) {
2032 port = port.substr( 0, iColon );
2033 if ( port != previousPort ) {
2034 if ( nDevices == device ) info.name = port;
2036 previousPort = port;
2039 } while ( ports[++nPorts] );
2043 if ( device >= nDevices ) {
2044 jack_client_close( client );
2045 errorText_ = "RtApiJack::getDeviceInfo: device ID is invalid!";
2046 error( RtAudioError::INVALID_USE );
2050 // Get the current jack server sample rate.
2051 info.sampleRates.clear();
2053 info.preferredSampleRate = jack_get_sample_rate( client );
2054 info.sampleRates.push_back( info.preferredSampleRate );
2056 // Count the available ports containing the client name as device
2057 // channels. Jack "input ports" equal RtAudio output channels.
2058 unsigned int nChannels = 0;
2059 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsInput );
2061 while ( ports[ nChannels ] ) nChannels++;
2063 info.outputChannels = nChannels;
2066 // Jack "output ports" equal RtAudio input channels.
2068 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsOutput );
2070 while ( ports[ nChannels ] ) nChannels++;
2072 info.inputChannels = nChannels;
2075 if ( info.outputChannels == 0 && info.inputChannels == 0 ) {
2076 jack_client_close(client);
2077 errorText_ = "RtApiJack::getDeviceInfo: error determining Jack input/output channels!";
2078 error( RtAudioError::WARNING );
2082 // If device opens for both playback and capture, we determine the channels.
2083 if ( info.outputChannels > 0 && info.inputChannels > 0 )
2084 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
2086 // Jack always uses 32-bit floats.
2087 info.nativeFormats = RTAUDIO_FLOAT32;
2089 // Jack doesn't provide default devices so we'll use the first available one.
2090 if ( device == 0 && info.outputChannels > 0 )
2091 info.isDefaultOutput = true;
2092 if ( device == 0 && info.inputChannels > 0 )
2093 info.isDefaultInput = true;
2095 jack_client_close(client);
2100 static int jackCallbackHandler( jack_nframes_t nframes, void *infoPointer )
2102 CallbackInfo *info = (CallbackInfo *) infoPointer;
2104 RtApiJack *object = (RtApiJack *) info->object;
2105 if ( object->callbackEvent( (unsigned long) nframes ) == false ) return 1;
2110 // This function will be called by a spawned thread when the Jack
2111 // server signals that it is shutting down. It is necessary to handle
2112 // it this way because the jackShutdown() function must return before
2113 // the jack_deactivate() function (in closeStream()) will return.
2114 static void *jackCloseStream( void *ptr )
2116 CallbackInfo *info = (CallbackInfo *) ptr;
2117 RtApiJack *object = (RtApiJack *) info->object;
2119 object->closeStream();
2121 pthread_exit( NULL );
2123 static void jackShutdown( void *infoPointer )
2125 CallbackInfo *info = (CallbackInfo *) infoPointer;
2126 RtApiJack *object = (RtApiJack *) info->object;
2128 // Check current stream state. If stopped, then we'll assume this
2129 // was called as a result of a call to RtApiJack::stopStream (the
2130 // deactivation of a client handle causes this function to be called).
2131 // If not, we'll assume the Jack server is shutting down or some
2132 // other problem occurred and we should close the stream.
2133 if ( object->isStreamRunning() == false ) return;
2135 ThreadHandle threadId;
2136 pthread_create( &threadId, NULL, jackCloseStream, info );
2137 std::cerr << "\nRtApiJack: the Jack server is shutting down this client ... stream stopped and closed!!\n" << std::endl;
2140 static int jackXrun( void *infoPointer )
2142 JackHandle *handle = (JackHandle *) infoPointer;
2144 if ( handle->ports[0] ) handle->xrun[0] = true;
2145 if ( handle->ports[1] ) handle->xrun[1] = true;
2150 bool RtApiJack :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
2151 unsigned int firstChannel, unsigned int sampleRate,
2152 RtAudioFormat format, unsigned int *bufferSize,
2153 RtAudio::StreamOptions *options )
2155 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2157 // Look for jack server and try to become a client (only do once per stream).
2158 jack_client_t *client = 0;
2159 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) {
2160 jack_options_t jackoptions = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
2161 jack_status_t *status = NULL;
2162 if ( options && !options->streamName.empty() )
2163 client = jack_client_open( options->streamName.c_str(), jackoptions, status );
2165 client = jack_client_open( "RtApiJack", jackoptions, status );
2166 if ( client == 0 ) {
2167 errorText_ = "RtApiJack::probeDeviceOpen: Jack server not found or connection error!";
2168 error( RtAudioError::WARNING );
2173 // The handle must have been created on an earlier pass.
2174 client = handle->client;
2178 std::string port, previousPort, deviceName;
2179 unsigned int nPorts = 0, nDevices = 0;
2180 ports = jack_get_ports( client, NULL, NULL, 0 );
2182 // Parse the port names up to the first colon (:).
2185 port = (char *) ports[ nPorts ];
2186 iColon = port.find(":");
2187 if ( iColon != std::string::npos ) {
2188 port = port.substr( 0, iColon );
2189 if ( port != previousPort ) {
2190 if ( nDevices == device ) deviceName = port;
2192 previousPort = port;
2195 } while ( ports[++nPorts] );
2199 if ( device >= nDevices ) {
2200 errorText_ = "RtApiJack::probeDeviceOpen: device ID is invalid!";
2204 // Count the available ports containing the client name as device
2205 // channels. Jack "input ports" equal RtAudio output channels.
2206 unsigned int nChannels = 0;
2207 unsigned long flag = JackPortIsInput;
2208 if ( mode == INPUT ) flag = JackPortIsOutput;
2209 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
2211 while ( ports[ nChannels ] ) nChannels++;
2215 // Compare the jack ports for specified client to the requested number of channels.
2216 if ( nChannels < (channels + firstChannel) ) {
2217 errorStream_ << "RtApiJack::probeDeviceOpen: requested number of channels (" << channels << ") + offset (" << firstChannel << ") not found for specified device (" << device << ":" << deviceName << ").";
2218 errorText_ = errorStream_.str();
2222 // Check the jack server sample rate.
2223 unsigned int jackRate = jack_get_sample_rate( client );
2224 if ( sampleRate != jackRate ) {
2225 jack_client_close( client );
2226 errorStream_ << "RtApiJack::probeDeviceOpen: the requested sample rate (" << sampleRate << ") is different than the JACK server rate (" << jackRate << ").";
2227 errorText_ = errorStream_.str();
2230 stream_.sampleRate = jackRate;
2232 // Get the latency of the JACK port.
2233 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
2234 if ( ports[ firstChannel ] ) {
2236 jack_latency_callback_mode_t cbmode = (mode == INPUT ? JackCaptureLatency : JackPlaybackLatency);
2237 // the range (usually the min and max are equal)
2238 jack_latency_range_t latrange; latrange.min = latrange.max = 0;
2239 // get the latency range
2240 jack_port_get_latency_range( jack_port_by_name( client, ports[firstChannel] ), cbmode, &latrange );
2241 // be optimistic, use the min!
2242 stream_.latency[mode] = latrange.min;
2243 //stream_.latency[mode] = jack_port_get_latency( jack_port_by_name( client, ports[ firstChannel ] ) );
2247 // The jack server always uses 32-bit floating-point data.
2248 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
2249 stream_.userFormat = format;
2251 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
2252 else stream_.userInterleaved = true;
2254 // Jack always uses non-interleaved buffers.
2255 stream_.deviceInterleaved[mode] = false;
2257 // Jack always provides host byte-ordered data.
2258 stream_.doByteSwap[mode] = false;
2260 // Get the buffer size. The buffer size and number of buffers
2261 // (periods) is set when the jack server is started.
2262 stream_.bufferSize = (int) jack_get_buffer_size( client );
2263 *bufferSize = stream_.bufferSize;
2265 stream_.nDeviceChannels[mode] = channels;
2266 stream_.nUserChannels[mode] = channels;
2268 // Set flags for buffer conversion.
2269 stream_.doConvertBuffer[mode] = false;
2270 if ( stream_.userFormat != stream_.deviceFormat[mode] )
2271 stream_.doConvertBuffer[mode] = true;
2272 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
2273 stream_.nUserChannels[mode] > 1 )
2274 stream_.doConvertBuffer[mode] = true;
2276 // Allocate our JackHandle structure for the stream.
2277 if ( handle == 0 ) {
2279 handle = new JackHandle;
2281 catch ( std::bad_alloc& ) {
2282 errorText_ = "RtApiJack::probeDeviceOpen: error allocating JackHandle memory.";
2286 if ( pthread_cond_init(&handle->condition, NULL) ) {
2287 errorText_ = "RtApiJack::probeDeviceOpen: error initializing pthread condition variable.";
2290 stream_.apiHandle = (void *) handle;
2291 handle->client = client;
2293 handle->deviceName[mode] = deviceName;
2295 // Allocate necessary internal buffers.
2296 unsigned long bufferBytes;
2297 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
2298 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
2299 if ( stream_.userBuffer[mode] == NULL ) {
2300 errorText_ = "RtApiJack::probeDeviceOpen: error allocating user buffer memory.";
2304 if ( stream_.doConvertBuffer[mode] ) {
2306 bool makeBuffer = true;
2307 if ( mode == OUTPUT )
2308 bufferBytes = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
2309 else { // mode == INPUT
2310 bufferBytes = stream_.nDeviceChannels[1] * formatBytes( stream_.deviceFormat[1] );
2311 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
2312 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes(stream_.deviceFormat[0]);
2313 if ( bufferBytes < bytesOut ) makeBuffer = false;
2318 bufferBytes *= *bufferSize;
2319 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
2320 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
2321 if ( stream_.deviceBuffer == NULL ) {
2322 errorText_ = "RtApiJack::probeDeviceOpen: error allocating device buffer memory.";
2328 // Allocate memory for the Jack ports (channels) identifiers.
2329 handle->ports[mode] = (jack_port_t **) malloc ( sizeof (jack_port_t *) * channels );
2330 if ( handle->ports[mode] == NULL ) {
2331 errorText_ = "RtApiJack::probeDeviceOpen: error allocating port memory.";
2335 stream_.device[mode] = device;
2336 stream_.channelOffset[mode] = firstChannel;
2337 stream_.state = STREAM_STOPPED;
2338 stream_.callbackInfo.object = (void *) this;
2340 if ( stream_.mode == OUTPUT && mode == INPUT )
2341 // We had already set up the stream for output.
2342 stream_.mode = DUPLEX;
2344 stream_.mode = mode;
2345 jack_set_process_callback( handle->client, jackCallbackHandler, (void *) &stream_.callbackInfo );
2346 jack_set_xrun_callback( handle->client, jackXrun, (void *) &handle );
2347 jack_on_shutdown( handle->client, jackShutdown, (void *) &stream_.callbackInfo );
2350 // Register our ports.
2352 if ( mode == OUTPUT ) {
2353 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2354 snprintf( label, 64, "outport %d", i );
2355 handle->ports[0][i] = jack_port_register( handle->client, (const char *)label,
2356 JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput, 0 );
2360 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2361 snprintf( label, 64, "inport %d", i );
2362 handle->ports[1][i] = jack_port_register( handle->client, (const char *)label,
2363 JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput, 0 );
2367 // Setup the buffer conversion information structure. We don't use
2368 // buffers to do channel offsets, so we override that parameter
2370 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
2372 if ( options && options->flags & RTAUDIO_JACK_DONT_CONNECT ) shouldAutoconnect_ = false;
2378 pthread_cond_destroy( &handle->condition );
2379 jack_client_close( handle->client );
2381 if ( handle->ports[0] ) free( handle->ports[0] );
2382 if ( handle->ports[1] ) free( handle->ports[1] );
2385 stream_.apiHandle = 0;
2388 for ( int i=0; i<2; i++ ) {
2389 if ( stream_.userBuffer[i] ) {
2390 free( stream_.userBuffer[i] );
2391 stream_.userBuffer[i] = 0;
2395 if ( stream_.deviceBuffer ) {
2396 free( stream_.deviceBuffer );
2397 stream_.deviceBuffer = 0;
2403 void RtApiJack :: closeStream( void )
2405 if ( stream_.state == STREAM_CLOSED ) {
2406 errorText_ = "RtApiJack::closeStream(): no open stream to close!";
2407 error( RtAudioError::WARNING );
2411 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2414 if ( stream_.state == STREAM_RUNNING )
2415 jack_deactivate( handle->client );
2417 jack_client_close( handle->client );
2421 if ( handle->ports[0] ) free( handle->ports[0] );
2422 if ( handle->ports[1] ) free( handle->ports[1] );
2423 pthread_cond_destroy( &handle->condition );
2425 stream_.apiHandle = 0;
2428 for ( int i=0; i<2; i++ ) {
2429 if ( stream_.userBuffer[i] ) {
2430 free( stream_.userBuffer[i] );
2431 stream_.userBuffer[i] = 0;
2435 if ( stream_.deviceBuffer ) {
2436 free( stream_.deviceBuffer );
2437 stream_.deviceBuffer = 0;
2440 stream_.mode = UNINITIALIZED;
2441 stream_.state = STREAM_CLOSED;
2444 void RtApiJack :: startStream( void )
2447 RtApi::startStream();
2448 if ( stream_.state == STREAM_RUNNING ) {
2449 errorText_ = "RtApiJack::startStream(): the stream is already running!";
2450 error( RtAudioError::WARNING );
2454 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2455 int result = jack_activate( handle->client );
2457 errorText_ = "RtApiJack::startStream(): unable to activate JACK client!";
2463 // Get the list of available ports.
2464 if ( shouldAutoconnect_ && (stream_.mode == OUTPUT || stream_.mode == DUPLEX) ) {
2466 ports = jack_get_ports( handle->client, handle->deviceName[0].c_str(), NULL, JackPortIsInput);
2467 if ( ports == NULL) {
2468 errorText_ = "RtApiJack::startStream(): error determining available JACK input ports!";
2472 // Now make the port connections. Since RtAudio wasn't designed to
2473 // allow the user to select particular channels of a device, we'll
2474 // just open the first "nChannels" ports with offset.
2475 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2477 if ( ports[ stream_.channelOffset[0] + i ] )
2478 result = jack_connect( handle->client, jack_port_name( handle->ports[0][i] ), ports[ stream_.channelOffset[0] + i ] );
2481 errorText_ = "RtApiJack::startStream(): error connecting output ports!";
2488 if ( shouldAutoconnect_ && (stream_.mode == INPUT || stream_.mode == DUPLEX) ) {
2490 ports = jack_get_ports( handle->client, handle->deviceName[1].c_str(), NULL, JackPortIsOutput );
2491 if ( ports == NULL) {
2492 errorText_ = "RtApiJack::startStream(): error determining available JACK output ports!";
2496 // Now make the port connections. See note above.
2497 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2499 if ( ports[ stream_.channelOffset[1] + i ] )
2500 result = jack_connect( handle->client, ports[ stream_.channelOffset[1] + i ], jack_port_name( handle->ports[1][i] ) );
2503 errorText_ = "RtApiJack::startStream(): error connecting input ports!";
2510 handle->drainCounter = 0;
2511 handle->internalDrain = false;
2512 stream_.state = STREAM_RUNNING;
2515 if ( result == 0 ) return;
2516 error( RtAudioError::SYSTEM_ERROR );
2519 void RtApiJack :: stopStream( void )
2522 if ( stream_.state == STREAM_STOPPED ) {
2523 errorText_ = "RtApiJack::stopStream(): the stream is already stopped!";
2524 error( RtAudioError::WARNING );
2528 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2529 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
2531 if ( handle->drainCounter == 0 ) {
2532 handle->drainCounter = 2;
2533 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
2537 jack_deactivate( handle->client );
2538 stream_.state = STREAM_STOPPED;
2541 void RtApiJack :: abortStream( void )
2544 if ( stream_.state == STREAM_STOPPED ) {
2545 errorText_ = "RtApiJack::abortStream(): the stream is already stopped!";
2546 error( RtAudioError::WARNING );
2550 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2551 handle->drainCounter = 2;
2556 // This function will be called by a spawned thread when the user
2557 // callback function signals that the stream should be stopped or
2558 // aborted. It is necessary to handle it this way because the
2559 // callbackEvent() function must return before the jack_deactivate()
2560 // function will return.
2561 static void *jackStopStream( void *ptr )
2563 CallbackInfo *info = (CallbackInfo *) ptr;
2564 RtApiJack *object = (RtApiJack *) info->object;
2566 object->stopStream();
2567 pthread_exit( NULL );
2570 bool RtApiJack :: callbackEvent( unsigned long nframes )
2572 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
2573 if ( stream_.state == STREAM_CLOSED ) {
2574 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
2575 error( RtAudioError::WARNING );
2578 if ( stream_.bufferSize != nframes ) {
2579 errorText_ = "RtApiCore::callbackEvent(): the JACK buffer size has changed ... cannot process!";
2580 error( RtAudioError::WARNING );
2584 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
2585 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2587 // Check if we were draining the stream and signal is finished.
2588 if ( handle->drainCounter > 3 ) {
2589 ThreadHandle threadId;
2591 stream_.state = STREAM_STOPPING;
2592 if ( handle->internalDrain == true )
2593 pthread_create( &threadId, NULL, jackStopStream, info );
2595 pthread_cond_signal( &handle->condition );
2599 // Invoke user callback first, to get fresh output data.
2600 if ( handle->drainCounter == 0 ) {
2601 RtAudioCallback callback = (RtAudioCallback) info->callback;
2602 double streamTime = getStreamTime();
2603 RtAudioStreamStatus status = 0;
2604 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
2605 status |= RTAUDIO_OUTPUT_UNDERFLOW;
2606 handle->xrun[0] = false;
2608 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
2609 status |= RTAUDIO_INPUT_OVERFLOW;
2610 handle->xrun[1] = false;
2612 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
2613 stream_.bufferSize, streamTime, status, info->userData );
2614 if ( cbReturnValue == 2 ) {
2615 stream_.state = STREAM_STOPPING;
2616 handle->drainCounter = 2;
2618 pthread_create( &id, NULL, jackStopStream, info );
2621 else if ( cbReturnValue == 1 ) {
2622 handle->drainCounter = 1;
2623 handle->internalDrain = true;
2627 jack_default_audio_sample_t *jackbuffer;
2628 unsigned long bufferBytes = nframes * sizeof( jack_default_audio_sample_t );
2629 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
2631 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
2633 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
2634 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2635 memset( jackbuffer, 0, bufferBytes );
2639 else if ( stream_.doConvertBuffer[0] ) {
2641 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
2643 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
2644 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2645 memcpy( jackbuffer, &stream_.deviceBuffer[i*bufferBytes], bufferBytes );
2648 else { // no buffer conversion
2649 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2650 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2651 memcpy( jackbuffer, &stream_.userBuffer[0][i*bufferBytes], bufferBytes );
2656 // Don't bother draining input
2657 if ( handle->drainCounter ) {
2658 handle->drainCounter++;
2662 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
2664 if ( stream_.doConvertBuffer[1] ) {
2665 for ( unsigned int i=0; i<stream_.nDeviceChannels[1]; i++ ) {
2666 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
2667 memcpy( &stream_.deviceBuffer[i*bufferBytes], jackbuffer, bufferBytes );
2669 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
2671 else { // no buffer conversion
2672 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2673 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
2674 memcpy( &stream_.userBuffer[1][i*bufferBytes], jackbuffer, bufferBytes );
2680 RtApi::tickStreamTime();
2683 //******************** End of __UNIX_JACK__ *********************//
2686 #if defined(__WINDOWS_ASIO__) // ASIO API on Windows
2688 // The ASIO API is designed around a callback scheme, so this
2689 // implementation is similar to that used for OS-X CoreAudio and Linux
2690 // Jack. The primary constraint with ASIO is that it only allows
2691 // access to a single driver at a time. Thus, it is not possible to
2692 // have more than one simultaneous RtAudio stream.
2694 // This implementation also requires a number of external ASIO files
2695 // and a few global variables. The ASIO callback scheme does not
2696 // allow for the passing of user data, so we must create a global
2697 // pointer to our callbackInfo structure.
2699 // On unix systems, we make use of a pthread condition variable.
2700 // Since there is no equivalent in Windows, I hacked something based
2701 // on information found in
2702 // http://www.cs.wustl.edu/~schmidt/win32-cv-1.html.
2704 #include "asiosys.h"
2706 #include "iasiothiscallresolver.h"
2707 #include "asiodrivers.h"
2710 static AsioDrivers drivers;
2711 static ASIOCallbacks asioCallbacks;
2712 static ASIODriverInfo driverInfo;
2713 static CallbackInfo *asioCallbackInfo;
2714 static bool asioXRun;
2717 int drainCounter; // Tracks callback counts when draining
2718 bool internalDrain; // Indicates if stop is initiated from callback or not.
2719 ASIOBufferInfo *bufferInfos;
2723 :drainCounter(0), internalDrain(false), bufferInfos(0) {}
2726 // Function declarations (definitions at end of section)
2727 static const char* getAsioErrorString( ASIOError result );
2728 static void sampleRateChanged( ASIOSampleRate sRate );
2729 static long asioMessages( long selector, long value, void* message, double* opt );
2731 RtApiAsio :: RtApiAsio()
2733 // ASIO cannot run on a multi-threaded appartment. You can call
2734 // CoInitialize beforehand, but it must be for appartment threading
2735 // (in which case, CoInitilialize will return S_FALSE here).
2736 coInitialized_ = false;
2737 HRESULT hr = CoInitialize( NULL );
2739 errorText_ = "RtApiAsio::ASIO requires a single-threaded appartment. Call CoInitializeEx(0,COINIT_APARTMENTTHREADED)";
2740 error( RtAudioError::WARNING );
2742 coInitialized_ = true;
2744 drivers.removeCurrentDriver();
2745 driverInfo.asioVersion = 2;
2747 // See note in DirectSound implementation about GetDesktopWindow().
2748 driverInfo.sysRef = GetForegroundWindow();
2751 RtApiAsio :: ~RtApiAsio()
2753 if ( stream_.state != STREAM_CLOSED ) closeStream();
2754 if ( coInitialized_ ) CoUninitialize();
2757 unsigned int RtApiAsio :: getDeviceCount( void )
2759 return (unsigned int) drivers.asioGetNumDev();
2762 RtAudio::DeviceInfo RtApiAsio :: getDeviceInfo( unsigned int device )
2764 RtAudio::DeviceInfo info;
2765 info.probed = false;
2768 unsigned int nDevices = getDeviceCount();
2769 if ( nDevices == 0 ) {
2770 errorText_ = "RtApiAsio::getDeviceInfo: no devices found!";
2771 error( RtAudioError::INVALID_USE );
2775 if ( device >= nDevices ) {
2776 errorText_ = "RtApiAsio::getDeviceInfo: device ID is invalid!";
2777 error( RtAudioError::INVALID_USE );
2781 // If a stream is already open, we cannot probe other devices. Thus, use the saved results.
2782 if ( stream_.state != STREAM_CLOSED ) {
2783 if ( device >= devices_.size() ) {
2784 errorText_ = "RtApiAsio::getDeviceInfo: device ID was not present before stream was opened.";
2785 error( RtAudioError::WARNING );
2788 return devices_[ device ];
2791 char driverName[32];
2792 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
2793 if ( result != ASE_OK ) {
2794 errorStream_ << "RtApiAsio::getDeviceInfo: unable to get driver name (" << getAsioErrorString( result ) << ").";
2795 errorText_ = errorStream_.str();
2796 error( RtAudioError::WARNING );
2800 info.name = driverName;
2802 if ( !drivers.loadDriver( driverName ) ) {
2803 errorStream_ << "RtApiAsio::getDeviceInfo: unable to load driver (" << driverName << ").";
2804 errorText_ = errorStream_.str();
2805 error( RtAudioError::WARNING );
2809 result = ASIOInit( &driverInfo );
2810 if ( result != ASE_OK ) {
2811 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
2812 errorText_ = errorStream_.str();
2813 error( RtAudioError::WARNING );
2817 // Determine the device channel information.
2818 long inputChannels, outputChannels;
2819 result = ASIOGetChannels( &inputChannels, &outputChannels );
2820 if ( result != ASE_OK ) {
2821 drivers.removeCurrentDriver();
2822 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
2823 errorText_ = errorStream_.str();
2824 error( RtAudioError::WARNING );
2828 info.outputChannels = outputChannels;
2829 info.inputChannels = inputChannels;
2830 if ( info.outputChannels > 0 && info.inputChannels > 0 )
2831 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
2833 // Determine the supported sample rates.
2834 info.sampleRates.clear();
2835 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
2836 result = ASIOCanSampleRate( (ASIOSampleRate) SAMPLE_RATES[i] );
2837 if ( result == ASE_OK ) {
2838 info.sampleRates.push_back( SAMPLE_RATES[i] );
2840 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
2841 info.preferredSampleRate = SAMPLE_RATES[i];
2845 // Determine supported data types ... just check first channel and assume rest are the same.
2846 ASIOChannelInfo channelInfo;
2847 channelInfo.channel = 0;
2848 channelInfo.isInput = true;
2849 if ( info.inputChannels <= 0 ) channelInfo.isInput = false;
2850 result = ASIOGetChannelInfo( &channelInfo );
2851 if ( result != ASE_OK ) {
2852 drivers.removeCurrentDriver();
2853 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting driver channel info (" << driverName << ").";
2854 errorText_ = errorStream_.str();
2855 error( RtAudioError::WARNING );
2859 info.nativeFormats = 0;
2860 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB )
2861 info.nativeFormats |= RTAUDIO_SINT16;
2862 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB )
2863 info.nativeFormats |= RTAUDIO_SINT32;
2864 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB )
2865 info.nativeFormats |= RTAUDIO_FLOAT32;
2866 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB )
2867 info.nativeFormats |= RTAUDIO_FLOAT64;
2868 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB )
2869 info.nativeFormats |= RTAUDIO_SINT24;
2871 if ( info.outputChannels > 0 )
2872 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
2873 if ( info.inputChannels > 0 )
2874 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
2877 drivers.removeCurrentDriver();
2881 static void bufferSwitch( long index, ASIOBool /*processNow*/ )
2883 RtApiAsio *object = (RtApiAsio *) asioCallbackInfo->object;
2884 object->callbackEvent( index );
2887 void RtApiAsio :: saveDeviceInfo( void )
2891 unsigned int nDevices = getDeviceCount();
2892 devices_.resize( nDevices );
2893 for ( unsigned int i=0; i<nDevices; i++ )
2894 devices_[i] = getDeviceInfo( i );
2897 bool RtApiAsio :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
2898 unsigned int firstChannel, unsigned int sampleRate,
2899 RtAudioFormat format, unsigned int *bufferSize,
2900 RtAudio::StreamOptions *options )
2901 {////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
2903 bool isDuplexInput = mode == INPUT && stream_.mode == OUTPUT;
2905 // For ASIO, a duplex stream MUST use the same driver.
2906 if ( isDuplexInput && stream_.device[0] != device ) {
2907 errorText_ = "RtApiAsio::probeDeviceOpen: an ASIO duplex stream must use the same device for input and output!";
2911 char driverName[32];
2912 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
2913 if ( result != ASE_OK ) {
2914 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to get driver name (" << getAsioErrorString( result ) << ").";
2915 errorText_ = errorStream_.str();
2919 // Only load the driver once for duplex stream.
2920 if ( !isDuplexInput ) {
2921 // The getDeviceInfo() function will not work when a stream is open
2922 // because ASIO does not allow multiple devices to run at the same
2923 // time. Thus, we'll probe the system before opening a stream and
2924 // save the results for use by getDeviceInfo().
2925 this->saveDeviceInfo();
2927 if ( !drivers.loadDriver( driverName ) ) {
2928 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to load driver (" << driverName << ").";
2929 errorText_ = errorStream_.str();
2933 result = ASIOInit( &driverInfo );
2934 if ( result != ASE_OK ) {
2935 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
2936 errorText_ = errorStream_.str();
2941 // keep them before any "goto error", they are used for error cleanup + goto device boundary checks
2942 bool buffersAllocated = false;
2943 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
2944 unsigned int nChannels;
2947 // Check the device channel count.
2948 long inputChannels, outputChannels;
2949 result = ASIOGetChannels( &inputChannels, &outputChannels );
2950 if ( result != ASE_OK ) {
2951 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
2952 errorText_ = errorStream_.str();
2956 if ( ( mode == OUTPUT && (channels+firstChannel) > (unsigned int) outputChannels) ||
2957 ( mode == INPUT && (channels+firstChannel) > (unsigned int) inputChannels) ) {
2958 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested channel count (" << channels << ") + offset (" << firstChannel << ").";
2959 errorText_ = errorStream_.str();
2962 stream_.nDeviceChannels[mode] = channels;
2963 stream_.nUserChannels[mode] = channels;
2964 stream_.channelOffset[mode] = firstChannel;
2966 // Verify the sample rate is supported.
2967 result = ASIOCanSampleRate( (ASIOSampleRate) sampleRate );
2968 if ( result != ASE_OK ) {
2969 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested sample rate (" << sampleRate << ").";
2970 errorText_ = errorStream_.str();
2974 // Get the current sample rate
2975 ASIOSampleRate currentRate;
2976 result = ASIOGetSampleRate( ¤tRate );
2977 if ( result != ASE_OK ) {
2978 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error getting sample rate.";
2979 errorText_ = errorStream_.str();
2983 // Set the sample rate only if necessary
2984 if ( currentRate != sampleRate ) {
2985 result = ASIOSetSampleRate( (ASIOSampleRate) sampleRate );
2986 if ( result != ASE_OK ) {
2987 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error setting sample rate (" << sampleRate << ").";
2988 errorText_ = errorStream_.str();
2993 // Determine the driver data type.
2994 ASIOChannelInfo channelInfo;
2995 channelInfo.channel = 0;
2996 if ( mode == OUTPUT ) channelInfo.isInput = false;
2997 else channelInfo.isInput = true;
2998 result = ASIOGetChannelInfo( &channelInfo );
2999 if ( result != ASE_OK ) {
3000 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting data format.";
3001 errorText_ = errorStream_.str();
3005 // Assuming WINDOWS host is always little-endian.
3006 stream_.doByteSwap[mode] = false;
3007 stream_.userFormat = format;
3008 stream_.deviceFormat[mode] = 0;
3009 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB ) {
3010 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
3011 if ( channelInfo.type == ASIOSTInt16MSB ) stream_.doByteSwap[mode] = true;
3013 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB ) {
3014 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
3015 if ( channelInfo.type == ASIOSTInt32MSB ) stream_.doByteSwap[mode] = true;
3017 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB ) {
3018 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
3019 if ( channelInfo.type == ASIOSTFloat32MSB ) stream_.doByteSwap[mode] = true;
3021 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB ) {
3022 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
3023 if ( channelInfo.type == ASIOSTFloat64MSB ) stream_.doByteSwap[mode] = true;
3025 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB ) {
3026 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
3027 if ( channelInfo.type == ASIOSTInt24MSB ) stream_.doByteSwap[mode] = true;
3030 if ( stream_.deviceFormat[mode] == 0 ) {
3031 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") data format not supported by RtAudio.";
3032 errorText_ = errorStream_.str();
3036 // Set the buffer size. For a duplex stream, this will end up
3037 // setting the buffer size based on the input constraints, which
3039 long minSize, maxSize, preferSize, granularity;
3040 result = ASIOGetBufferSize( &minSize, &maxSize, &preferSize, &granularity );
3041 if ( result != ASE_OK ) {
3042 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting buffer size.";
3043 errorText_ = errorStream_.str();
3047 if ( isDuplexInput ) {
3048 // When this is the duplex input (output was opened before), then we have to use the same
3049 // buffersize as the output, because it might use the preferred buffer size, which most
3050 // likely wasn't passed as input to this. The buffer sizes have to be identically anyway,
3051 // So instead of throwing an error, make them equal. The caller uses the reference
3052 // to the "bufferSize" param as usual to set up processing buffers.
3054 *bufferSize = stream_.bufferSize;
3057 if ( *bufferSize == 0 ) *bufferSize = preferSize;
3058 else if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
3059 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
3060 else if ( granularity == -1 ) {
3061 // Make sure bufferSize is a power of two.
3062 int log2_of_min_size = 0;
3063 int log2_of_max_size = 0;
3065 for ( unsigned int i = 0; i < sizeof(long) * 8; i++ ) {
3066 if ( minSize & ((long)1 << i) ) log2_of_min_size = i;
3067 if ( maxSize & ((long)1 << i) ) log2_of_max_size = i;
3070 long min_delta = std::abs( (long)*bufferSize - ((long)1 << log2_of_min_size) );
3071 int min_delta_num = log2_of_min_size;
3073 for (int i = log2_of_min_size + 1; i <= log2_of_max_size; i++) {
3074 long current_delta = std::abs( (long)*bufferSize - ((long)1 << i) );
3075 if (current_delta < min_delta) {
3076 min_delta = current_delta;
3081 *bufferSize = ( (unsigned int)1 << min_delta_num );
3082 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
3083 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
3085 else if ( granularity != 0 ) {
3086 // Set to an even multiple of granularity, rounding up.
3087 *bufferSize = (*bufferSize + granularity-1) / granularity * granularity;
3092 // we don't use it anymore, see above!
3093 // Just left it here for the case...
3094 if ( isDuplexInput && stream_.bufferSize != *bufferSize ) {
3095 errorText_ = "RtApiAsio::probeDeviceOpen: input/output buffersize discrepancy!";
3100 stream_.bufferSize = *bufferSize;
3101 stream_.nBuffers = 2;
3103 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
3104 else stream_.userInterleaved = true;
3106 // ASIO always uses non-interleaved buffers.
3107 stream_.deviceInterleaved[mode] = false;
3109 // Allocate, if necessary, our AsioHandle structure for the stream.
3110 if ( handle == 0 ) {
3112 handle = new AsioHandle;
3114 catch ( std::bad_alloc& ) {
3115 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating AsioHandle memory.";
3118 handle->bufferInfos = 0;
3120 // Create a manual-reset event.
3121 handle->condition = CreateEvent( NULL, // no security
3122 TRUE, // manual-reset
3123 FALSE, // non-signaled initially
3125 stream_.apiHandle = (void *) handle;
3128 // Create the ASIO internal buffers. Since RtAudio sets up input
3129 // and output separately, we'll have to dispose of previously
3130 // created output buffers for a duplex stream.
3131 if ( mode == INPUT && stream_.mode == OUTPUT ) {
3132 ASIODisposeBuffers();
3133 if ( handle->bufferInfos ) free( handle->bufferInfos );
3136 // Allocate, initialize, and save the bufferInfos in our stream callbackInfo structure.
3138 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
3139 handle->bufferInfos = (ASIOBufferInfo *) malloc( nChannels * sizeof(ASIOBufferInfo) );
3140 if ( handle->bufferInfos == NULL ) {
3141 errorStream_ << "RtApiAsio::probeDeviceOpen: error allocating bufferInfo memory for driver (" << driverName << ").";
3142 errorText_ = errorStream_.str();
3146 ASIOBufferInfo *infos;
3147 infos = handle->bufferInfos;
3148 for ( i=0; i<stream_.nDeviceChannels[0]; i++, infos++ ) {
3149 infos->isInput = ASIOFalse;
3150 infos->channelNum = i + stream_.channelOffset[0];
3151 infos->buffers[0] = infos->buffers[1] = 0;
3153 for ( i=0; i<stream_.nDeviceChannels[1]; i++, infos++ ) {
3154 infos->isInput = ASIOTrue;
3155 infos->channelNum = i + stream_.channelOffset[1];
3156 infos->buffers[0] = infos->buffers[1] = 0;
3159 // prepare for callbacks
3160 stream_.sampleRate = sampleRate;
3161 stream_.device[mode] = device;
3162 stream_.mode = isDuplexInput ? DUPLEX : mode;
3164 // store this class instance before registering callbacks, that are going to use it
3165 asioCallbackInfo = &stream_.callbackInfo;
3166 stream_.callbackInfo.object = (void *) this;
3168 // Set up the ASIO callback structure and create the ASIO data buffers.
3169 asioCallbacks.bufferSwitch = &bufferSwitch;
3170 asioCallbacks.sampleRateDidChange = &sampleRateChanged;
3171 asioCallbacks.asioMessage = &asioMessages;
3172 asioCallbacks.bufferSwitchTimeInfo = NULL;
3173 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
3174 if ( result != ASE_OK ) {
3175 // Standard method failed. This can happen with strict/misbehaving drivers that return valid buffer size ranges
3176 // but only accept the preferred buffer size as parameter for ASIOCreateBuffers. eg. Creatives ASIO driver
3177 // in that case, let's be naïve and try that instead
3178 *bufferSize = preferSize;
3179 stream_.bufferSize = *bufferSize;
3180 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
3183 if ( result != ASE_OK ) {
3184 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") creating buffers.";
3185 errorText_ = errorStream_.str();
3188 buffersAllocated = true;
3189 stream_.state = STREAM_STOPPED;
3191 // Set flags for buffer conversion.
3192 stream_.doConvertBuffer[mode] = false;
3193 if ( stream_.userFormat != stream_.deviceFormat[mode] )
3194 stream_.doConvertBuffer[mode] = true;
3195 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
3196 stream_.nUserChannels[mode] > 1 )
3197 stream_.doConvertBuffer[mode] = true;
3199 // Allocate necessary internal buffers
3200 unsigned long bufferBytes;
3201 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
3202 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
3203 if ( stream_.userBuffer[mode] == NULL ) {
3204 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating user buffer memory.";
3208 if ( stream_.doConvertBuffer[mode] ) {
3210 bool makeBuffer = true;
3211 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
3212 if ( isDuplexInput && stream_.deviceBuffer ) {
3213 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
3214 if ( bufferBytes <= bytesOut ) makeBuffer = false;
3218 bufferBytes *= *bufferSize;
3219 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
3220 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
3221 if ( stream_.deviceBuffer == NULL ) {
3222 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating device buffer memory.";
3228 // Determine device latencies
3229 long inputLatency, outputLatency;
3230 result = ASIOGetLatencies( &inputLatency, &outputLatency );
3231 if ( result != ASE_OK ) {
3232 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting latency.";
3233 errorText_ = errorStream_.str();
3234 error( RtAudioError::WARNING); // warn but don't fail
3237 stream_.latency[0] = outputLatency;
3238 stream_.latency[1] = inputLatency;
3241 // Setup the buffer conversion information structure. We don't use
3242 // buffers to do channel offsets, so we override that parameter
3244 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
3249 if ( !isDuplexInput ) {
3250 // the cleanup for error in the duplex input, is done by RtApi::openStream
3251 // So we clean up for single channel only
3253 if ( buffersAllocated )
3254 ASIODisposeBuffers();
3256 drivers.removeCurrentDriver();
3259 CloseHandle( handle->condition );
3260 if ( handle->bufferInfos )
3261 free( handle->bufferInfos );
3264 stream_.apiHandle = 0;
3268 if ( stream_.userBuffer[mode] ) {
3269 free( stream_.userBuffer[mode] );
3270 stream_.userBuffer[mode] = 0;
3273 if ( stream_.deviceBuffer ) {
3274 free( stream_.deviceBuffer );
3275 stream_.deviceBuffer = 0;
3280 }////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
3282 void RtApiAsio :: closeStream()
3284 if ( stream_.state == STREAM_CLOSED ) {
3285 errorText_ = "RtApiAsio::closeStream(): no open stream to close!";
3286 error( RtAudioError::WARNING );
3290 if ( stream_.state == STREAM_RUNNING ) {
3291 stream_.state = STREAM_STOPPED;
3294 ASIODisposeBuffers();
3295 drivers.removeCurrentDriver();
3297 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3299 CloseHandle( handle->condition );
3300 if ( handle->bufferInfos )
3301 free( handle->bufferInfos );
3303 stream_.apiHandle = 0;
3306 for ( int i=0; i<2; i++ ) {
3307 if ( stream_.userBuffer[i] ) {
3308 free( stream_.userBuffer[i] );
3309 stream_.userBuffer[i] = 0;
3313 if ( stream_.deviceBuffer ) {
3314 free( stream_.deviceBuffer );
3315 stream_.deviceBuffer = 0;
3318 stream_.mode = UNINITIALIZED;
3319 stream_.state = STREAM_CLOSED;
3322 bool stopThreadCalled = false;
3324 void RtApiAsio :: startStream()
3327 RtApi::startStream();
3328 if ( stream_.state == STREAM_RUNNING ) {
3329 errorText_ = "RtApiAsio::startStream(): the stream is already running!";
3330 error( RtAudioError::WARNING );
3334 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3335 ASIOError result = ASIOStart();
3336 if ( result != ASE_OK ) {
3337 errorStream_ << "RtApiAsio::startStream: error (" << getAsioErrorString( result ) << ") starting device.";
3338 errorText_ = errorStream_.str();
3342 handle->drainCounter = 0;
3343 handle->internalDrain = false;
3344 ResetEvent( handle->condition );
3345 stream_.state = STREAM_RUNNING;
3349 stopThreadCalled = false;
3351 if ( result == ASE_OK ) return;
3352 error( RtAudioError::SYSTEM_ERROR );
3355 void RtApiAsio :: stopStream()
3358 if ( stream_.state == STREAM_STOPPED ) {
3359 errorText_ = "RtApiAsio::stopStream(): the stream is already stopped!";
3360 error( RtAudioError::WARNING );
3364 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3365 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
3366 if ( handle->drainCounter == 0 ) {
3367 handle->drainCounter = 2;
3368 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
3372 stream_.state = STREAM_STOPPED;
3374 ASIOError result = ASIOStop();
3375 if ( result != ASE_OK ) {
3376 errorStream_ << "RtApiAsio::stopStream: error (" << getAsioErrorString( result ) << ") stopping device.";
3377 errorText_ = errorStream_.str();
3380 if ( result == ASE_OK ) return;
3381 error( RtAudioError::SYSTEM_ERROR );
3384 void RtApiAsio :: abortStream()
3387 if ( stream_.state == STREAM_STOPPED ) {
3388 errorText_ = "RtApiAsio::abortStream(): the stream is already stopped!";
3389 error( RtAudioError::WARNING );
3393 // The following lines were commented-out because some behavior was
3394 // noted where the device buffers need to be zeroed to avoid
3395 // continuing sound, even when the device buffers are completely
3396 // disposed. So now, calling abort is the same as calling stop.
3397 // AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3398 // handle->drainCounter = 2;
3402 // This function will be called by a spawned thread when the user
3403 // callback function signals that the stream should be stopped or
3404 // aborted. It is necessary to handle it this way because the
3405 // callbackEvent() function must return before the ASIOStop()
3406 // function will return.
3407 static unsigned __stdcall asioStopStream( void *ptr )
3409 CallbackInfo *info = (CallbackInfo *) ptr;
3410 RtApiAsio *object = (RtApiAsio *) info->object;
3412 object->stopStream();
3417 bool RtApiAsio :: callbackEvent( long bufferIndex )
3419 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
3420 if ( stream_.state == STREAM_CLOSED ) {
3421 errorText_ = "RtApiAsio::callbackEvent(): the stream is closed ... this shouldn't happen!";
3422 error( RtAudioError::WARNING );
3426 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
3427 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3429 // Check if we were draining the stream and signal if finished.
3430 if ( handle->drainCounter > 3 ) {
3432 stream_.state = STREAM_STOPPING;
3433 if ( handle->internalDrain == false )
3434 SetEvent( handle->condition );
3435 else { // spawn a thread to stop the stream
3437 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
3438 &stream_.callbackInfo, 0, &threadId );
3443 // Invoke user callback to get fresh output data UNLESS we are
3445 if ( handle->drainCounter == 0 ) {
3446 RtAudioCallback callback = (RtAudioCallback) info->callback;
3447 double streamTime = getStreamTime();
3448 RtAudioStreamStatus status = 0;
3449 if ( stream_.mode != INPUT && asioXRun == true ) {
3450 status |= RTAUDIO_OUTPUT_UNDERFLOW;
3453 if ( stream_.mode != OUTPUT && asioXRun == true ) {
3454 status |= RTAUDIO_INPUT_OVERFLOW;
3457 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
3458 stream_.bufferSize, streamTime, status, info->userData );
3459 if ( cbReturnValue == 2 ) {
3460 stream_.state = STREAM_STOPPING;
3461 handle->drainCounter = 2;
3463 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
3464 &stream_.callbackInfo, 0, &threadId );
3467 else if ( cbReturnValue == 1 ) {
3468 handle->drainCounter = 1;
3469 handle->internalDrain = true;
3473 unsigned int nChannels, bufferBytes, i, j;
3474 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
3475 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
3477 bufferBytes = stream_.bufferSize * formatBytes( stream_.deviceFormat[0] );
3479 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
3481 for ( i=0, j=0; i<nChannels; i++ ) {
3482 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3483 memset( handle->bufferInfos[i].buffers[bufferIndex], 0, bufferBytes );
3487 else if ( stream_.doConvertBuffer[0] ) {
3489 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
3490 if ( stream_.doByteSwap[0] )
3491 byteSwapBuffer( stream_.deviceBuffer,
3492 stream_.bufferSize * stream_.nDeviceChannels[0],
3493 stream_.deviceFormat[0] );
3495 for ( i=0, j=0; i<nChannels; i++ ) {
3496 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3497 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
3498 &stream_.deviceBuffer[j++*bufferBytes], bufferBytes );
3504 if ( stream_.doByteSwap[0] )
3505 byteSwapBuffer( stream_.userBuffer[0],
3506 stream_.bufferSize * stream_.nUserChannels[0],
3507 stream_.userFormat );
3509 for ( i=0, j=0; i<nChannels; i++ ) {
3510 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3511 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
3512 &stream_.userBuffer[0][bufferBytes*j++], bufferBytes );
3518 // Don't bother draining input
3519 if ( handle->drainCounter ) {
3520 handle->drainCounter++;
3524 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
3526 bufferBytes = stream_.bufferSize * formatBytes(stream_.deviceFormat[1]);
3528 if (stream_.doConvertBuffer[1]) {
3530 // Always interleave ASIO input data.
3531 for ( i=0, j=0; i<nChannels; i++ ) {
3532 if ( handle->bufferInfos[i].isInput == ASIOTrue )
3533 memcpy( &stream_.deviceBuffer[j++*bufferBytes],
3534 handle->bufferInfos[i].buffers[bufferIndex],
3538 if ( stream_.doByteSwap[1] )
3539 byteSwapBuffer( stream_.deviceBuffer,
3540 stream_.bufferSize * stream_.nDeviceChannels[1],
3541 stream_.deviceFormat[1] );
3542 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
3546 for ( i=0, j=0; i<nChannels; i++ ) {
3547 if ( handle->bufferInfos[i].isInput == ASIOTrue ) {
3548 memcpy( &stream_.userBuffer[1][bufferBytes*j++],
3549 handle->bufferInfos[i].buffers[bufferIndex],
3554 if ( stream_.doByteSwap[1] )
3555 byteSwapBuffer( stream_.userBuffer[1],
3556 stream_.bufferSize * stream_.nUserChannels[1],
3557 stream_.userFormat );
3562 // The following call was suggested by Malte Clasen. While the API
3563 // documentation indicates it should not be required, some device
3564 // drivers apparently do not function correctly without it.
3567 RtApi::tickStreamTime();
3571 static void sampleRateChanged( ASIOSampleRate sRate )
3573 // The ASIO documentation says that this usually only happens during
3574 // external sync. Audio processing is not stopped by the driver,
3575 // actual sample rate might not have even changed, maybe only the
3576 // sample rate status of an AES/EBU or S/PDIF digital input at the
3579 RtApi *object = (RtApi *) asioCallbackInfo->object;
3581 object->stopStream();
3583 catch ( RtAudioError &exception ) {
3584 std::cerr << "\nRtApiAsio: sampleRateChanged() error (" << exception.getMessage() << ")!\n" << std::endl;
3588 std::cerr << "\nRtApiAsio: driver reports sample rate changed to " << sRate << " ... stream stopped!!!\n" << std::endl;
3591 static long asioMessages( long selector, long value, void* /*message*/, double* /*opt*/ )
3595 switch( selector ) {
3596 case kAsioSelectorSupported:
3597 if ( value == kAsioResetRequest
3598 || value == kAsioEngineVersion
3599 || value == kAsioResyncRequest
3600 || value == kAsioLatenciesChanged
3601 // The following three were added for ASIO 2.0, you don't
3602 // necessarily have to support them.
3603 || value == kAsioSupportsTimeInfo
3604 || value == kAsioSupportsTimeCode
3605 || value == kAsioSupportsInputMonitor)
3608 case kAsioResetRequest:
3609 // Defer the task and perform the reset of the driver during the
3610 // next "safe" situation. You cannot reset the driver right now,
3611 // as this code is called from the driver. Reset the driver is
3612 // done by completely destruct is. I.e. ASIOStop(),
3613 // ASIODisposeBuffers(), Destruction Afterwards you initialize the
3615 std::cerr << "\nRtApiAsio: driver reset requested!!!" << std::endl;
3618 case kAsioResyncRequest:
3619 // This informs the application that the driver encountered some
3620 // non-fatal data loss. It is used for synchronization purposes
3621 // of different media. Added mainly to work around the Win16Mutex
3622 // problems in Windows 95/98 with the Windows Multimedia system,
3623 // which could lose data because the Mutex was held too long by
3624 // another thread. However a driver can issue it in other
3626 // std::cerr << "\nRtApiAsio: driver resync requested!!!" << std::endl;
3630 case kAsioLatenciesChanged:
3631 // This will inform the host application that the drivers were
3632 // latencies changed. Beware, it this does not mean that the
3633 // buffer sizes have changed! You might need to update internal
3635 std::cerr << "\nRtApiAsio: driver latency may have changed!!!" << std::endl;
3638 case kAsioEngineVersion:
3639 // Return the supported ASIO version of the host application. If
3640 // a host application does not implement this selector, ASIO 1.0
3641 // is assumed by the driver.
3644 case kAsioSupportsTimeInfo:
3645 // Informs the driver whether the
3646 // asioCallbacks.bufferSwitchTimeInfo() callback is supported.
3647 // For compatibility with ASIO 1.0 drivers the host application
3648 // should always support the "old" bufferSwitch method, too.
3651 case kAsioSupportsTimeCode:
3652 // Informs the driver whether application is interested in time
3653 // code info. If an application does not need to know about time
3654 // code, the driver has less work to do.
3661 static const char* getAsioErrorString( ASIOError result )
3669 static const Messages m[] =
3671 { ASE_NotPresent, "Hardware input or output is not present or available." },
3672 { ASE_HWMalfunction, "Hardware is malfunctioning." },
3673 { ASE_InvalidParameter, "Invalid input parameter." },
3674 { ASE_InvalidMode, "Invalid mode." },
3675 { ASE_SPNotAdvancing, "Sample position not advancing." },
3676 { ASE_NoClock, "Sample clock or rate cannot be determined or is not present." },
3677 { ASE_NoMemory, "Not enough memory to complete the request." }
3680 for ( unsigned int i = 0; i < sizeof(m)/sizeof(m[0]); ++i )
3681 if ( m[i].value == result ) return m[i].message;
3683 return "Unknown error.";
3686 //******************** End of __WINDOWS_ASIO__ *********************//
3690 #if defined(__WINDOWS_WASAPI__) // Windows WASAPI API
3692 // Authored by Marcus Tomlinson <themarcustomlinson@gmail.com>, April 2014
3693 // - Introduces support for the Windows WASAPI API
3694 // - Aims to deliver bit streams to and from hardware at the lowest possible latency, via the absolute minimum buffer sizes required
3695 // - Provides flexible stream configuration to an otherwise strict and inflexible WASAPI interface
3696 // - Includes automatic internal conversion of sample rate and buffer size between hardware and the user
3701 #include <audioclient.h>
3703 #include <mmdeviceapi.h>
3704 #include <functiondiscoverykeys_devpkey.h>
3706 //=============================================================================
3708 #define SAFE_RELEASE( objectPtr )\
3711 objectPtr->Release();\
3715 typedef HANDLE ( __stdcall *TAvSetMmThreadCharacteristicsPtr )( LPCWSTR TaskName, LPDWORD TaskIndex );
3717 //-----------------------------------------------------------------------------
3719 // WASAPI dictates stream sample rate, format, channel count, and in some cases, buffer size.
3720 // Therefore we must perform all necessary conversions to user buffers in order to satisfy these
3721 // requirements. WasapiBuffer ring buffers are used between HwIn->UserIn and UserOut->HwOut to
3722 // provide intermediate storage for read / write synchronization.
3736 // sets the length of the internal ring buffer
3737 void setBufferSize( unsigned int bufferSize, unsigned int formatBytes ) {
3740 buffer_ = ( char* ) calloc( bufferSize, formatBytes );
3742 bufferSize_ = bufferSize;
3747 // attempt to push a buffer into the ring buffer at the current "in" index
3748 bool pushBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
3750 if ( !buffer || // incoming buffer is NULL
3751 bufferSize == 0 || // incoming buffer has no data
3752 bufferSize > bufferSize_ ) // incoming buffer too large
3757 unsigned int relOutIndex = outIndex_;
3758 unsigned int inIndexEnd = inIndex_ + bufferSize;
3759 if ( relOutIndex < inIndex_ && inIndexEnd >= bufferSize_ ) {
3760 relOutIndex += bufferSize_;
3763 // "in" index can end on the "out" index but cannot begin at it
3764 if ( inIndex_ <= relOutIndex && inIndexEnd > relOutIndex ) {
3765 return false; // not enough space between "in" index and "out" index
3768 // copy buffer from external to internal
3769 int fromZeroSize = inIndex_ + bufferSize - bufferSize_;
3770 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
3771 int fromInSize = bufferSize - fromZeroSize;
3776 memcpy( &( ( char* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( char ) );
3777 memcpy( buffer_, &( ( char* ) buffer )[fromInSize], fromZeroSize * sizeof( char ) );
3779 case RTAUDIO_SINT16:
3780 memcpy( &( ( short* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( short ) );
3781 memcpy( buffer_, &( ( short* ) buffer )[fromInSize], fromZeroSize * sizeof( short ) );
3783 case RTAUDIO_SINT24:
3784 memcpy( &( ( S24* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( S24 ) );
3785 memcpy( buffer_, &( ( S24* ) buffer )[fromInSize], fromZeroSize * sizeof( S24 ) );
3787 case RTAUDIO_SINT32:
3788 memcpy( &( ( int* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( int ) );
3789 memcpy( buffer_, &( ( int* ) buffer )[fromInSize], fromZeroSize * sizeof( int ) );
3791 case RTAUDIO_FLOAT32:
3792 memcpy( &( ( float* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( float ) );
3793 memcpy( buffer_, &( ( float* ) buffer )[fromInSize], fromZeroSize * sizeof( float ) );
3795 case RTAUDIO_FLOAT64:
3796 memcpy( &( ( double* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( double ) );
3797 memcpy( buffer_, &( ( double* ) buffer )[fromInSize], fromZeroSize * sizeof( double ) );
3801 // update "in" index
3802 inIndex_ += bufferSize;
3803 inIndex_ %= bufferSize_;
3808 // attempt to pull a buffer from the ring buffer from the current "out" index
3809 bool pullBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
3811 if ( !buffer || // incoming buffer is NULL
3812 bufferSize == 0 || // incoming buffer has no data
3813 bufferSize > bufferSize_ ) // incoming buffer too large
3818 unsigned int relInIndex = inIndex_;
3819 unsigned int outIndexEnd = outIndex_ + bufferSize;
3820 if ( relInIndex < outIndex_ && outIndexEnd >= bufferSize_ ) {
3821 relInIndex += bufferSize_;
3824 // "out" index can begin at and end on the "in" index
3825 if ( outIndex_ < relInIndex && outIndexEnd > relInIndex ) {
3826 return false; // not enough space between "out" index and "in" index
3829 // copy buffer from internal to external
3830 int fromZeroSize = outIndex_ + bufferSize - bufferSize_;
3831 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
3832 int fromOutSize = bufferSize - fromZeroSize;
3837 memcpy( buffer, &( ( char* ) buffer_ )[outIndex_], fromOutSize * sizeof( char ) );
3838 memcpy( &( ( char* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( char ) );
3840 case RTAUDIO_SINT16:
3841 memcpy( buffer, &( ( short* ) buffer_ )[outIndex_], fromOutSize * sizeof( short ) );
3842 memcpy( &( ( short* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( short ) );
3844 case RTAUDIO_SINT24:
3845 memcpy( buffer, &( ( S24* ) buffer_ )[outIndex_], fromOutSize * sizeof( S24 ) );
3846 memcpy( &( ( S24* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( S24 ) );
3848 case RTAUDIO_SINT32:
3849 memcpy( buffer, &( ( int* ) buffer_ )[outIndex_], fromOutSize * sizeof( int ) );
3850 memcpy( &( ( int* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( int ) );
3852 case RTAUDIO_FLOAT32:
3853 memcpy( buffer, &( ( float* ) buffer_ )[outIndex_], fromOutSize * sizeof( float ) );
3854 memcpy( &( ( float* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( float ) );
3856 case RTAUDIO_FLOAT64:
3857 memcpy( buffer, &( ( double* ) buffer_ )[outIndex_], fromOutSize * sizeof( double ) );
3858 memcpy( &( ( double* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( double ) );
3862 // update "out" index
3863 outIndex_ += bufferSize;
3864 outIndex_ %= bufferSize_;
3871 unsigned int bufferSize_;
3872 unsigned int inIndex_;
3873 unsigned int outIndex_;
3876 //-----------------------------------------------------------------------------
3878 // In order to satisfy WASAPI's buffer requirements, we need a means of converting sample rate
3879 // between HW and the user. The convertBufferWasapi function is used to perform this conversion
3880 // between HwIn->UserIn and UserOut->HwOut during the stream callback loop.
3881 // This sample rate converter works best with conversions between one rate and its multiple.
3882 void convertBufferWasapi( char* outBuffer,
3883 const char* inBuffer,
3884 const unsigned int& channelCount,
3885 const unsigned int& inSampleRate,
3886 const unsigned int& outSampleRate,
3887 const unsigned int& inSampleCount,
3888 unsigned int& outSampleCount,
3889 const RtAudioFormat& format )
3891 // calculate the new outSampleCount and relative sampleStep
3892 float sampleRatio = ( float ) outSampleRate / inSampleRate;
3893 float sampleRatioInv = ( float ) 1 / sampleRatio;
3894 float sampleStep = 1.0f / sampleRatio;
3895 float inSampleFraction = 0.0f;
3897 outSampleCount = ( unsigned int ) std::roundf( inSampleCount * sampleRatio );
3899 // if inSampleRate is a multiple of outSampleRate (or vice versa) there's no need to interpolate
3900 if ( floor( sampleRatio ) == sampleRatio || floor( sampleRatioInv ) == sampleRatioInv )
3902 // frame-by-frame, copy each relative input sample into it's corresponding output sample
3903 for ( unsigned int outSample = 0; outSample < outSampleCount; outSample++ )
3905 unsigned int inSample = ( unsigned int ) inSampleFraction;
3910 memcpy( &( ( char* ) outBuffer )[ outSample * channelCount ], &( ( char* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( char ) );
3912 case RTAUDIO_SINT16:
3913 memcpy( &( ( short* ) outBuffer )[ outSample * channelCount ], &( ( short* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( short ) );
3915 case RTAUDIO_SINT24:
3916 memcpy( &( ( S24* ) outBuffer )[ outSample * channelCount ], &( ( S24* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( S24 ) );
3918 case RTAUDIO_SINT32:
3919 memcpy( &( ( int* ) outBuffer )[ outSample * channelCount ], &( ( int* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( int ) );
3921 case RTAUDIO_FLOAT32:
3922 memcpy( &( ( float* ) outBuffer )[ outSample * channelCount ], &( ( float* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( float ) );
3924 case RTAUDIO_FLOAT64:
3925 memcpy( &( ( double* ) outBuffer )[ outSample * channelCount ], &( ( double* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( double ) );
3929 // jump to next in sample
3930 inSampleFraction += sampleStep;
3933 else // else interpolate
3935 // frame-by-frame, copy each relative input sample into it's corresponding output sample
3936 for ( unsigned int outSample = 0; outSample < outSampleCount; outSample++ )
3938 unsigned int inSample = ( unsigned int ) inSampleFraction;
3939 float inSampleDec = inSampleFraction - inSample;
3940 unsigned int frameInSample = inSample * channelCount;
3941 unsigned int frameOutSample = outSample * channelCount;
3947 for ( unsigned int channel = 0; channel < channelCount; channel++ )
3949 char fromSample = ( ( char* ) inBuffer )[ frameInSample + channel ];
3950 char toSample = ( ( char* ) inBuffer )[ frameInSample + channelCount + channel ];
3951 char sampleDiff = ( char ) ( ( toSample - fromSample ) * inSampleDec );
3952 ( ( char* ) outBuffer )[ frameOutSample + channel ] = fromSample + sampleDiff;
3956 case RTAUDIO_SINT16:
3958 for ( unsigned int channel = 0; channel < channelCount; channel++ )
3960 short fromSample = ( ( short* ) inBuffer )[ frameInSample + channel ];
3961 short toSample = ( ( short* ) inBuffer )[ frameInSample + channelCount + channel ];
3962 short sampleDiff = ( short ) ( ( toSample - fromSample ) * inSampleDec );
3963 ( ( short* ) outBuffer )[ frameOutSample + channel ] = fromSample + sampleDiff;
3967 case RTAUDIO_SINT24:
3969 for ( unsigned int channel = 0; channel < channelCount; channel++ )
3971 int fromSample = ( ( S24* ) inBuffer )[ frameInSample + channel ].asInt();
3972 int toSample = ( ( S24* ) inBuffer )[ frameInSample + channelCount + channel ].asInt();
3973 int sampleDiff = ( int ) ( ( toSample - fromSample ) * inSampleDec );
3974 ( ( S24* ) outBuffer )[ frameOutSample + channel ] = fromSample + sampleDiff;
3978 case RTAUDIO_SINT32:
3980 for ( unsigned int channel = 0; channel < channelCount; channel++ )
3982 int fromSample = ( ( int* ) inBuffer )[ frameInSample + channel ];
3983 int toSample = ( ( int* ) inBuffer )[ frameInSample + channelCount + channel ];
3984 int sampleDiff = ( int ) ( ( toSample - fromSample ) * inSampleDec );
3985 ( ( int* ) outBuffer )[ frameOutSample + channel ] = fromSample + sampleDiff;
3989 case RTAUDIO_FLOAT32:
3991 for ( unsigned int channel = 0; channel < channelCount; channel++ )
3993 float fromSample = ( ( float* ) inBuffer )[ frameInSample + channel ];
3994 float toSample = ( ( float* ) inBuffer )[ frameInSample + channelCount + channel ];
3995 float sampleDiff = ( toSample - fromSample ) * inSampleDec;
3996 ( ( float* ) outBuffer )[ frameOutSample + channel ] = fromSample + sampleDiff;
4000 case RTAUDIO_FLOAT64:
4002 for ( unsigned int channel = 0; channel < channelCount; channel++ )
4004 double fromSample = ( ( double* ) inBuffer )[ frameInSample + channel ];
4005 double toSample = ( ( double* ) inBuffer )[ frameInSample + channelCount + channel ];
4006 double sampleDiff = ( toSample - fromSample ) * inSampleDec;
4007 ( ( double* ) outBuffer )[ frameOutSample + channel ] = fromSample + sampleDiff;
4013 // jump to next in sample
4014 inSampleFraction += sampleStep;
4019 //-----------------------------------------------------------------------------
4021 // A structure to hold various information related to the WASAPI implementation.
4024 IAudioClient* captureAudioClient;
4025 IAudioClient* renderAudioClient;
4026 IAudioCaptureClient* captureClient;
4027 IAudioRenderClient* renderClient;
4028 HANDLE captureEvent;
4032 : captureAudioClient( NULL ),
4033 renderAudioClient( NULL ),
4034 captureClient( NULL ),
4035 renderClient( NULL ),
4036 captureEvent( NULL ),
4037 renderEvent( NULL ) {}
4040 //=============================================================================
4042 RtApiWasapi::RtApiWasapi()
4043 : coInitialized_( false ), deviceEnumerator_( NULL )
4045 // WASAPI can run either apartment or multi-threaded
4046 HRESULT hr = CoInitialize( NULL );
4047 if ( !FAILED( hr ) )
4048 coInitialized_ = true;
4050 // Instantiate device enumerator
4051 hr = CoCreateInstance( __uuidof( MMDeviceEnumerator ), NULL,
4052 CLSCTX_ALL, __uuidof( IMMDeviceEnumerator ),
4053 ( void** ) &deviceEnumerator_ );
4055 if ( FAILED( hr ) ) {
4056 errorText_ = "RtApiWasapi::RtApiWasapi: Unable to instantiate device enumerator";
4057 error( RtAudioError::DRIVER_ERROR );
4061 //-----------------------------------------------------------------------------
4063 RtApiWasapi::~RtApiWasapi()
4065 if ( stream_.state != STREAM_CLOSED )
4068 SAFE_RELEASE( deviceEnumerator_ );
4070 // If this object previously called CoInitialize()
4071 if ( coInitialized_ )
4075 //=============================================================================
4077 unsigned int RtApiWasapi::getDeviceCount( void )
4079 unsigned int captureDeviceCount = 0;
4080 unsigned int renderDeviceCount = 0;
4082 IMMDeviceCollection* captureDevices = NULL;
4083 IMMDeviceCollection* renderDevices = NULL;
4085 // Count capture devices
4087 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4088 if ( FAILED( hr ) ) {
4089 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device collection.";
4093 hr = captureDevices->GetCount( &captureDeviceCount );
4094 if ( FAILED( hr ) ) {
4095 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device count.";
4099 // Count render devices
4100 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4101 if ( FAILED( hr ) ) {
4102 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device collection.";
4106 hr = renderDevices->GetCount( &renderDeviceCount );
4107 if ( FAILED( hr ) ) {
4108 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device count.";
4113 // release all references
4114 SAFE_RELEASE( captureDevices );
4115 SAFE_RELEASE( renderDevices );
4117 if ( errorText_.empty() )
4118 return captureDeviceCount + renderDeviceCount;
4120 error( RtAudioError::DRIVER_ERROR );
4124 //-----------------------------------------------------------------------------
4126 RtAudio::DeviceInfo RtApiWasapi::getDeviceInfo( unsigned int device )
4128 RtAudio::DeviceInfo info;
4129 unsigned int captureDeviceCount = 0;
4130 unsigned int renderDeviceCount = 0;
4131 std::string defaultDeviceName;
4132 bool isCaptureDevice = false;
4134 PROPVARIANT deviceNameProp;
4135 PROPVARIANT defaultDeviceNameProp;
4137 IMMDeviceCollection* captureDevices = NULL;
4138 IMMDeviceCollection* renderDevices = NULL;
4139 IMMDevice* devicePtr = NULL;
4140 IMMDevice* defaultDevicePtr = NULL;
4141 IAudioClient* audioClient = NULL;
4142 IPropertyStore* devicePropStore = NULL;
4143 IPropertyStore* defaultDevicePropStore = NULL;
4145 WAVEFORMATEX* deviceFormat = NULL;
4146 WAVEFORMATEX* closestMatchFormat = NULL;
4149 info.probed = false;
4151 // Count capture devices
4153 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4154 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4155 if ( FAILED( hr ) ) {
4156 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device collection.";
4160 hr = captureDevices->GetCount( &captureDeviceCount );
4161 if ( FAILED( hr ) ) {
4162 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device count.";
4166 // Count render devices
4167 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4168 if ( FAILED( hr ) ) {
4169 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device collection.";
4173 hr = renderDevices->GetCount( &renderDeviceCount );
4174 if ( FAILED( hr ) ) {
4175 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device count.";
4179 // validate device index
4180 if ( device >= captureDeviceCount + renderDeviceCount ) {
4181 errorText_ = "RtApiWasapi::getDeviceInfo: Invalid device index.";
4182 errorType = RtAudioError::INVALID_USE;
4186 // determine whether index falls within capture or render devices
4187 if ( device >= renderDeviceCount ) {
4188 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
4189 if ( FAILED( hr ) ) {
4190 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device handle.";
4193 isCaptureDevice = true;
4196 hr = renderDevices->Item( device, &devicePtr );
4197 if ( FAILED( hr ) ) {
4198 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device handle.";
4201 isCaptureDevice = false;
4204 // get default device name
4205 if ( isCaptureDevice ) {
4206 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eCapture, eConsole, &defaultDevicePtr );
4207 if ( FAILED( hr ) ) {
4208 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default capture device handle.";
4213 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eRender, eConsole, &defaultDevicePtr );
4214 if ( FAILED( hr ) ) {
4215 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default render device handle.";
4220 hr = defaultDevicePtr->OpenPropertyStore( STGM_READ, &defaultDevicePropStore );
4221 if ( FAILED( hr ) ) {
4222 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open default device property store.";
4225 PropVariantInit( &defaultDeviceNameProp );
4227 hr = defaultDevicePropStore->GetValue( PKEY_Device_FriendlyName, &defaultDeviceNameProp );
4228 if ( FAILED( hr ) ) {
4229 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default device property: PKEY_Device_FriendlyName.";
4233 defaultDeviceName = convertCharPointerToStdString(defaultDeviceNameProp.pwszVal);
4236 hr = devicePtr->OpenPropertyStore( STGM_READ, &devicePropStore );
4237 if ( FAILED( hr ) ) {
4238 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open device property store.";
4242 PropVariantInit( &deviceNameProp );
4244 hr = devicePropStore->GetValue( PKEY_Device_FriendlyName, &deviceNameProp );
4245 if ( FAILED( hr ) ) {
4246 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device property: PKEY_Device_FriendlyName.";
4250 info.name =convertCharPointerToStdString(deviceNameProp.pwszVal);
4253 if ( isCaptureDevice ) {
4254 info.isDefaultInput = info.name == defaultDeviceName;
4255 info.isDefaultOutput = false;
4258 info.isDefaultInput = false;
4259 info.isDefaultOutput = info.name == defaultDeviceName;
4263 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL, NULL, ( void** ) &audioClient );
4264 if ( FAILED( hr ) ) {
4265 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device audio client.";
4269 hr = audioClient->GetMixFormat( &deviceFormat );
4270 if ( FAILED( hr ) ) {
4271 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device mix format.";
4275 if ( isCaptureDevice ) {
4276 info.inputChannels = deviceFormat->nChannels;
4277 info.outputChannels = 0;
4278 info.duplexChannels = 0;
4281 info.inputChannels = 0;
4282 info.outputChannels = deviceFormat->nChannels;
4283 info.duplexChannels = 0;
4287 info.sampleRates.clear();
4289 // allow support for all sample rates as we have a built-in sample rate converter
4290 for ( unsigned int i = 0; i < MAX_SAMPLE_RATES; i++ ) {
4291 info.sampleRates.push_back( SAMPLE_RATES[i] );
4293 info.preferredSampleRate = deviceFormat->nSamplesPerSec;
4296 info.nativeFormats = 0;
4298 if ( deviceFormat->wFormatTag == WAVE_FORMAT_IEEE_FLOAT ||
4299 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
4300 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_IEEE_FLOAT ) )
4302 if ( deviceFormat->wBitsPerSample == 32 ) {
4303 info.nativeFormats |= RTAUDIO_FLOAT32;
4305 else if ( deviceFormat->wBitsPerSample == 64 ) {
4306 info.nativeFormats |= RTAUDIO_FLOAT64;
4309 else if ( deviceFormat->wFormatTag == WAVE_FORMAT_PCM ||
4310 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
4311 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_PCM ) )
4313 if ( deviceFormat->wBitsPerSample == 8 ) {
4314 info.nativeFormats |= RTAUDIO_SINT8;
4316 else if ( deviceFormat->wBitsPerSample == 16 ) {
4317 info.nativeFormats |= RTAUDIO_SINT16;
4319 else if ( deviceFormat->wBitsPerSample == 24 ) {
4320 info.nativeFormats |= RTAUDIO_SINT24;
4322 else if ( deviceFormat->wBitsPerSample == 32 ) {
4323 info.nativeFormats |= RTAUDIO_SINT32;
4331 // release all references
4332 PropVariantClear( &deviceNameProp );
4333 PropVariantClear( &defaultDeviceNameProp );
4335 SAFE_RELEASE( captureDevices );
4336 SAFE_RELEASE( renderDevices );
4337 SAFE_RELEASE( devicePtr );
4338 SAFE_RELEASE( defaultDevicePtr );
4339 SAFE_RELEASE( audioClient );
4340 SAFE_RELEASE( devicePropStore );
4341 SAFE_RELEASE( defaultDevicePropStore );
4343 CoTaskMemFree( deviceFormat );
4344 CoTaskMemFree( closestMatchFormat );
4346 if ( !errorText_.empty() )
4351 //-----------------------------------------------------------------------------
4353 unsigned int RtApiWasapi::getDefaultOutputDevice( void )
4355 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
4356 if ( getDeviceInfo( i ).isDefaultOutput ) {
4364 //-----------------------------------------------------------------------------
4366 unsigned int RtApiWasapi::getDefaultInputDevice( void )
4368 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
4369 if ( getDeviceInfo( i ).isDefaultInput ) {
4377 //-----------------------------------------------------------------------------
4379 void RtApiWasapi::closeStream( void )
4381 if ( stream_.state == STREAM_CLOSED ) {
4382 errorText_ = "RtApiWasapi::closeStream: No open stream to close.";
4383 error( RtAudioError::WARNING );
4387 if ( stream_.state != STREAM_STOPPED )
4390 // clean up stream memory
4391 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient )
4392 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient )
4394 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureClient )
4395 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderClient )
4397 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent )
4398 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent );
4400 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent )
4401 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent );
4403 delete ( WasapiHandle* ) stream_.apiHandle;
4404 stream_.apiHandle = NULL;
4406 for ( int i = 0; i < 2; i++ ) {
4407 if ( stream_.userBuffer[i] ) {
4408 free( stream_.userBuffer[i] );
4409 stream_.userBuffer[i] = 0;
4413 if ( stream_.deviceBuffer ) {
4414 free( stream_.deviceBuffer );
4415 stream_.deviceBuffer = 0;
4418 // update stream state
4419 stream_.state = STREAM_CLOSED;
4422 //-----------------------------------------------------------------------------
4424 void RtApiWasapi::startStream( void )
4427 RtApi::startStream();
4429 if ( stream_.state == STREAM_RUNNING ) {
4430 errorText_ = "RtApiWasapi::startStream: The stream is already running.";
4431 error( RtAudioError::WARNING );
4435 // update stream state
4436 stream_.state = STREAM_RUNNING;
4438 // create WASAPI stream thread
4439 stream_.callbackInfo.thread = ( ThreadHandle ) CreateThread( NULL, 0, runWasapiThread, this, CREATE_SUSPENDED, NULL );
4441 if ( !stream_.callbackInfo.thread ) {
4442 errorText_ = "RtApiWasapi::startStream: Unable to instantiate callback thread.";
4443 error( RtAudioError::THREAD_ERROR );
4446 SetThreadPriority( ( void* ) stream_.callbackInfo.thread, stream_.callbackInfo.priority );
4447 ResumeThread( ( void* ) stream_.callbackInfo.thread );
4451 //-----------------------------------------------------------------------------
4453 void RtApiWasapi::stopStream( void )
4457 if ( stream_.state == STREAM_STOPPED ) {
4458 errorText_ = "RtApiWasapi::stopStream: The stream is already stopped.";
4459 error( RtAudioError::WARNING );
4463 // inform stream thread by setting stream state to STREAM_STOPPING
4464 stream_.state = STREAM_STOPPING;
4466 // wait until stream thread is stopped
4467 while( stream_.state != STREAM_STOPPED ) {
4471 // Wait for the last buffer to play before stopping.
4472 Sleep( 1000 * stream_.bufferSize / stream_.sampleRate );
4474 // stop capture client if applicable
4475 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) {
4476 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient->Stop();
4477 if ( FAILED( hr ) ) {
4478 errorText_ = "RtApiWasapi::stopStream: Unable to stop capture stream.";
4479 error( RtAudioError::DRIVER_ERROR );
4484 // stop render client if applicable
4485 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) {
4486 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient->Stop();
4487 if ( FAILED( hr ) ) {
4488 errorText_ = "RtApiWasapi::stopStream: Unable to stop render stream.";
4489 error( RtAudioError::DRIVER_ERROR );
4494 // close thread handle
4495 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
4496 errorText_ = "RtApiWasapi::stopStream: Unable to close callback thread.";
4497 error( RtAudioError::THREAD_ERROR );
4501 stream_.callbackInfo.thread = (ThreadHandle) NULL;
4504 //-----------------------------------------------------------------------------
4506 void RtApiWasapi::abortStream( void )
4510 if ( stream_.state == STREAM_STOPPED ) {
4511 errorText_ = "RtApiWasapi::abortStream: The stream is already stopped.";
4512 error( RtAudioError::WARNING );
4516 // inform stream thread by setting stream state to STREAM_STOPPING
4517 stream_.state = STREAM_STOPPING;
4519 // wait until stream thread is stopped
4520 while ( stream_.state != STREAM_STOPPED ) {
4524 // stop capture client if applicable
4525 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) {
4526 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient->Stop();
4527 if ( FAILED( hr ) ) {
4528 errorText_ = "RtApiWasapi::abortStream: Unable to stop capture stream.";
4529 error( RtAudioError::DRIVER_ERROR );
4534 // stop render client if applicable
4535 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) {
4536 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient->Stop();
4537 if ( FAILED( hr ) ) {
4538 errorText_ = "RtApiWasapi::abortStream: Unable to stop render stream.";
4539 error( RtAudioError::DRIVER_ERROR );
4544 // close thread handle
4545 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
4546 errorText_ = "RtApiWasapi::abortStream: Unable to close callback thread.";
4547 error( RtAudioError::THREAD_ERROR );
4551 stream_.callbackInfo.thread = (ThreadHandle) NULL;
4554 //-----------------------------------------------------------------------------
4556 bool RtApiWasapi::probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
4557 unsigned int firstChannel, unsigned int sampleRate,
4558 RtAudioFormat format, unsigned int* bufferSize,
4559 RtAudio::StreamOptions* options )
4561 bool methodResult = FAILURE;
4562 unsigned int captureDeviceCount = 0;
4563 unsigned int renderDeviceCount = 0;
4565 IMMDeviceCollection* captureDevices = NULL;
4566 IMMDeviceCollection* renderDevices = NULL;
4567 IMMDevice* devicePtr = NULL;
4568 WAVEFORMATEX* deviceFormat = NULL;
4569 unsigned int bufferBytes;
4570 stream_.state = STREAM_STOPPED;
4572 // create API Handle if not already created
4573 if ( !stream_.apiHandle )
4574 stream_.apiHandle = ( void* ) new WasapiHandle();
4576 // Count capture devices
4578 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4579 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4580 if ( FAILED( hr ) ) {
4581 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device collection.";
4585 hr = captureDevices->GetCount( &captureDeviceCount );
4586 if ( FAILED( hr ) ) {
4587 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device count.";
4591 // Count render devices
4592 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4593 if ( FAILED( hr ) ) {
4594 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device collection.";
4598 hr = renderDevices->GetCount( &renderDeviceCount );
4599 if ( FAILED( hr ) ) {
4600 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device count.";
4604 // validate device index
4605 if ( device >= captureDeviceCount + renderDeviceCount ) {
4606 errorType = RtAudioError::INVALID_USE;
4607 errorText_ = "RtApiWasapi::probeDeviceOpen: Invalid device index.";
4611 // determine whether index falls within capture or render devices
4612 if ( device >= renderDeviceCount ) {
4613 if ( mode != INPUT ) {
4614 errorType = RtAudioError::INVALID_USE;
4615 errorText_ = "RtApiWasapi::probeDeviceOpen: Capture device selected as output device.";
4619 // retrieve captureAudioClient from devicePtr
4620 IAudioClient*& captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4622 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
4623 if ( FAILED( hr ) ) {
4624 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device handle.";
4628 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4629 NULL, ( void** ) &captureAudioClient );
4630 if ( FAILED( hr ) ) {
4631 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device audio client.";
4635 hr = captureAudioClient->GetMixFormat( &deviceFormat );
4636 if ( FAILED( hr ) ) {
4637 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device mix format.";
4641 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4642 captureAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4645 if ( mode != OUTPUT ) {
4646 errorType = RtAudioError::INVALID_USE;
4647 errorText_ = "RtApiWasapi::probeDeviceOpen: Render device selected as input device.";
4651 // retrieve renderAudioClient from devicePtr
4652 IAudioClient*& renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4654 hr = renderDevices->Item( device, &devicePtr );
4655 if ( FAILED( hr ) ) {
4656 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device handle.";
4660 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4661 NULL, ( void** ) &renderAudioClient );
4662 if ( FAILED( hr ) ) {
4663 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device audio client.";
4667 hr = renderAudioClient->GetMixFormat( &deviceFormat );
4668 if ( FAILED( hr ) ) {
4669 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device mix format.";
4673 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4674 renderAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4678 if ( ( stream_.mode == OUTPUT && mode == INPUT ) ||
4679 ( stream_.mode == INPUT && mode == OUTPUT ) ) {
4680 stream_.mode = DUPLEX;
4683 stream_.mode = mode;
4686 stream_.device[mode] = device;
4687 stream_.doByteSwap[mode] = false;
4688 stream_.sampleRate = sampleRate;
4689 stream_.bufferSize = *bufferSize;
4690 stream_.nBuffers = 1;
4691 stream_.nUserChannels[mode] = channels;
4692 stream_.channelOffset[mode] = firstChannel;
4693 stream_.userFormat = format;
4694 stream_.deviceFormat[mode] = getDeviceInfo( device ).nativeFormats;
4696 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
4697 stream_.userInterleaved = false;
4699 stream_.userInterleaved = true;
4700 stream_.deviceInterleaved[mode] = true;
4702 // Set flags for buffer conversion.
4703 stream_.doConvertBuffer[mode] = false;
4704 if ( stream_.userFormat != stream_.deviceFormat[mode] ||
4705 stream_.nUserChannels != stream_.nDeviceChannels )
4706 stream_.doConvertBuffer[mode] = true;
4707 else if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
4708 stream_.nUserChannels[mode] > 1 )
4709 stream_.doConvertBuffer[mode] = true;
4711 if ( stream_.doConvertBuffer[mode] )
4712 setConvertInfo( mode, 0 );
4714 // Allocate necessary internal buffers
4715 bufferBytes = stream_.nUserChannels[mode] * stream_.bufferSize * formatBytes( stream_.userFormat );
4717 stream_.userBuffer[mode] = ( char* ) calloc( bufferBytes, 1 );
4718 if ( !stream_.userBuffer[mode] ) {
4719 errorType = RtAudioError::MEMORY_ERROR;
4720 errorText_ = "RtApiWasapi::probeDeviceOpen: Error allocating user buffer memory.";
4724 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME )
4725 stream_.callbackInfo.priority = 15;
4727 stream_.callbackInfo.priority = 0;
4729 ///! TODO: RTAUDIO_MINIMIZE_LATENCY // Provide stream buffers directly to callback
4730 ///! TODO: RTAUDIO_HOG_DEVICE // Exclusive mode
4732 methodResult = SUCCESS;
4736 SAFE_RELEASE( captureDevices );
4737 SAFE_RELEASE( renderDevices );
4738 SAFE_RELEASE( devicePtr );
4739 CoTaskMemFree( deviceFormat );
4741 // if method failed, close the stream
4742 if ( methodResult == FAILURE )
4745 if ( !errorText_.empty() )
4747 return methodResult;
4750 //=============================================================================
4752 DWORD WINAPI RtApiWasapi::runWasapiThread( void* wasapiPtr )
4755 ( ( RtApiWasapi* ) wasapiPtr )->wasapiThread();
4760 DWORD WINAPI RtApiWasapi::stopWasapiThread( void* wasapiPtr )
4763 ( ( RtApiWasapi* ) wasapiPtr )->stopStream();
4768 DWORD WINAPI RtApiWasapi::abortWasapiThread( void* wasapiPtr )
4771 ( ( RtApiWasapi* ) wasapiPtr )->abortStream();
4776 //-----------------------------------------------------------------------------
4778 void RtApiWasapi::wasapiThread()
4780 // as this is a new thread, we must CoInitialize it
4781 CoInitialize( NULL );
4785 IAudioClient* captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4786 IAudioClient* renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4787 IAudioCaptureClient* captureClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureClient;
4788 IAudioRenderClient* renderClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderClient;
4789 HANDLE captureEvent = ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent;
4790 HANDLE renderEvent = ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent;
4792 WAVEFORMATEX* captureFormat = NULL;
4793 WAVEFORMATEX* renderFormat = NULL;
4794 float captureSrRatio = 0.0f;
4795 float renderSrRatio = 0.0f;
4796 WasapiBuffer captureBuffer;
4797 WasapiBuffer renderBuffer;
4799 // declare local stream variables
4800 RtAudioCallback callback = ( RtAudioCallback ) stream_.callbackInfo.callback;
4801 BYTE* streamBuffer = NULL;
4802 unsigned long captureFlags = 0;
4803 unsigned int bufferFrameCount = 0;
4804 unsigned int numFramesPadding = 0;
4805 unsigned int convBufferSize = 0;
4806 bool callbackPushed = false;
4807 bool callbackPulled = false;
4808 bool callbackStopped = false;
4809 int callbackResult = 0;
4811 // convBuffer is used to store converted buffers between WASAPI and the user
4812 char* convBuffer = NULL;
4813 unsigned int convBuffSize = 0;
4814 unsigned int deviceBuffSize = 0;
4817 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4819 // Attempt to assign "Pro Audio" characteristic to thread
4820 HMODULE AvrtDll = LoadLibrary( (LPCTSTR) "AVRT.dll" );
4822 DWORD taskIndex = 0;
4823 TAvSetMmThreadCharacteristicsPtr AvSetMmThreadCharacteristicsPtr = ( TAvSetMmThreadCharacteristicsPtr ) GetProcAddress( AvrtDll, "AvSetMmThreadCharacteristicsW" );
4824 AvSetMmThreadCharacteristicsPtr( L"Pro Audio", &taskIndex );
4825 FreeLibrary( AvrtDll );
4828 // start capture stream if applicable
4829 if ( captureAudioClient ) {
4830 hr = captureAudioClient->GetMixFormat( &captureFormat );
4831 if ( FAILED( hr ) ) {
4832 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
4836 captureSrRatio = ( ( float ) captureFormat->nSamplesPerSec / stream_.sampleRate );
4838 // initialize capture stream according to desire buffer size
4839 float desiredBufferSize = stream_.bufferSize * captureSrRatio;
4840 REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) desiredBufferSize * 10000000 / captureFormat->nSamplesPerSec );
4842 if ( !captureClient ) {
4843 hr = captureAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
4844 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
4845 desiredBufferPeriod,
4846 desiredBufferPeriod,
4849 if ( FAILED( hr ) ) {
4850 errorText_ = "RtApiWasapi::wasapiThread: Unable to initialize capture audio client.";
4854 hr = captureAudioClient->GetService( __uuidof( IAudioCaptureClient ),
4855 ( void** ) &captureClient );
4856 if ( FAILED( hr ) ) {
4857 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve capture client handle.";
4861 // configure captureEvent to trigger on every available capture buffer
4862 captureEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
4863 if ( !captureEvent ) {
4864 errorType = RtAudioError::SYSTEM_ERROR;
4865 errorText_ = "RtApiWasapi::wasapiThread: Unable to create capture event.";
4869 hr = captureAudioClient->SetEventHandle( captureEvent );
4870 if ( FAILED( hr ) ) {
4871 errorText_ = "RtApiWasapi::wasapiThread: Unable to set capture event handle.";
4875 ( ( WasapiHandle* ) stream_.apiHandle )->captureClient = captureClient;
4876 ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent = captureEvent;
4879 unsigned int inBufferSize = 0;
4880 hr = captureAudioClient->GetBufferSize( &inBufferSize );
4881 if ( FAILED( hr ) ) {
4882 errorText_ = "RtApiWasapi::wasapiThread: Unable to get capture buffer size.";
4886 // scale outBufferSize according to stream->user sample rate ratio
4887 unsigned int outBufferSize = ( unsigned int ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT];
4888 inBufferSize *= stream_.nDeviceChannels[INPUT];
4890 // set captureBuffer size
4891 captureBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[INPUT] ) );
4893 // reset the capture stream
4894 hr = captureAudioClient->Reset();
4895 if ( FAILED( hr ) ) {
4896 errorText_ = "RtApiWasapi::wasapiThread: Unable to reset capture stream.";
4900 // start the capture stream
4901 hr = captureAudioClient->Start();
4902 if ( FAILED( hr ) ) {
4903 errorText_ = "RtApiWasapi::wasapiThread: Unable to start capture stream.";
4908 // start render stream if applicable
4909 if ( renderAudioClient ) {
4910 hr = renderAudioClient->GetMixFormat( &renderFormat );
4911 if ( FAILED( hr ) ) {
4912 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
4916 renderSrRatio = ( ( float ) renderFormat->nSamplesPerSec / stream_.sampleRate );
4918 // initialize render stream according to desire buffer size
4919 float desiredBufferSize = stream_.bufferSize * renderSrRatio;
4920 REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) desiredBufferSize * 10000000 / renderFormat->nSamplesPerSec );
4922 if ( !renderClient ) {
4923 hr = renderAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
4924 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
4925 desiredBufferPeriod,
4926 desiredBufferPeriod,
4929 if ( FAILED( hr ) ) {
4930 errorText_ = "RtApiWasapi::wasapiThread: Unable to initialize render audio client.";
4934 hr = renderAudioClient->GetService( __uuidof( IAudioRenderClient ),
4935 ( void** ) &renderClient );
4936 if ( FAILED( hr ) ) {
4937 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render client handle.";
4941 // configure renderEvent to trigger on every available render buffer
4942 renderEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
4943 if ( !renderEvent ) {
4944 errorType = RtAudioError::SYSTEM_ERROR;
4945 errorText_ = "RtApiWasapi::wasapiThread: Unable to create render event.";
4949 hr = renderAudioClient->SetEventHandle( renderEvent );
4950 if ( FAILED( hr ) ) {
4951 errorText_ = "RtApiWasapi::wasapiThread: Unable to set render event handle.";
4955 ( ( WasapiHandle* ) stream_.apiHandle )->renderClient = renderClient;
4956 ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent = renderEvent;
4959 unsigned int outBufferSize = 0;
4960 hr = renderAudioClient->GetBufferSize( &outBufferSize );
4961 if ( FAILED( hr ) ) {
4962 errorText_ = "RtApiWasapi::wasapiThread: Unable to get render buffer size.";
4966 // scale inBufferSize according to user->stream sample rate ratio
4967 unsigned int inBufferSize = ( unsigned int ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT];
4968 outBufferSize *= stream_.nDeviceChannels[OUTPUT];
4970 // set renderBuffer size
4971 renderBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[OUTPUT] ) );
4973 // reset the render stream
4974 hr = renderAudioClient->Reset();
4975 if ( FAILED( hr ) ) {
4976 errorText_ = "RtApiWasapi::wasapiThread: Unable to reset render stream.";
4980 // start the render stream
4981 hr = renderAudioClient->Start();
4982 if ( FAILED( hr ) ) {
4983 errorText_ = "RtApiWasapi::wasapiThread: Unable to start render stream.";
4988 if ( stream_.mode == INPUT ) {
4989 convBuffSize = ( size_t ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
4990 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
4992 else if ( stream_.mode == OUTPUT ) {
4993 convBuffSize = ( size_t ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
4994 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
4996 else if ( stream_.mode == DUPLEX ) {
4997 convBuffSize = std::max( ( size_t ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
4998 ( size_t ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
4999 deviceBuffSize = std::max( stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
5000 stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
5003 convBuffer = ( char* ) malloc( convBuffSize );
5004 stream_.deviceBuffer = ( char* ) malloc( deviceBuffSize );
5005 if ( !convBuffer || !stream_.deviceBuffer ) {
5006 errorType = RtAudioError::MEMORY_ERROR;
5007 errorText_ = "RtApiWasapi::wasapiThread: Error allocating device buffer memory.";
5011 // stream process loop
5012 while ( stream_.state != STREAM_STOPPING ) {
5013 if ( !callbackPulled ) {
5016 // 1. Pull callback buffer from inputBuffer
5017 // 2. If 1. was successful: Convert callback buffer to user sample rate and channel count
5018 // Convert callback buffer to user format
5020 if ( captureAudioClient ) {
5021 // Pull callback buffer from inputBuffer
5022 callbackPulled = captureBuffer.pullBuffer( convBuffer,
5023 ( unsigned int ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT],
5024 stream_.deviceFormat[INPUT] );
5026 if ( callbackPulled ) {
5027 // Convert callback buffer to user sample rate
5028 convertBufferWasapi( stream_.deviceBuffer,
5030 stream_.nDeviceChannels[INPUT],
5031 captureFormat->nSamplesPerSec,
5033 ( unsigned int ) ( stream_.bufferSize * captureSrRatio ),
5035 stream_.deviceFormat[INPUT] );
5037 if ( stream_.doConvertBuffer[INPUT] ) {
5038 // Convert callback buffer to user format
5039 convertBuffer( stream_.userBuffer[INPUT],
5040 stream_.deviceBuffer,
5041 stream_.convertInfo[INPUT] );
5044 // no further conversion, simple copy deviceBuffer to userBuffer
5045 memcpy( stream_.userBuffer[INPUT],
5046 stream_.deviceBuffer,
5047 stream_.bufferSize * stream_.nUserChannels[INPUT] * formatBytes( stream_.userFormat ) );
5052 // if there is no capture stream, set callbackPulled flag
5053 callbackPulled = true;
5058 // 1. Execute user callback method
5059 // 2. Handle return value from callback
5061 // if callback has not requested the stream to stop
5062 if ( callbackPulled && !callbackStopped ) {
5063 // Execute user callback method
5064 callbackResult = callback( stream_.userBuffer[OUTPUT],
5065 stream_.userBuffer[INPUT],
5068 captureFlags & AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY ? RTAUDIO_INPUT_OVERFLOW : 0,
5069 stream_.callbackInfo.userData );
5071 // Handle return value from callback
5072 if ( callbackResult == 1 ) {
5073 // instantiate a thread to stop this thread
5074 HANDLE threadHandle = CreateThread( NULL, 0, stopWasapiThread, this, 0, NULL );
5075 if ( !threadHandle ) {
5076 errorType = RtAudioError::THREAD_ERROR;
5077 errorText_ = "RtApiWasapi::wasapiThread: Unable to instantiate stream stop thread.";
5080 else if ( !CloseHandle( threadHandle ) ) {
5081 errorType = RtAudioError::THREAD_ERROR;
5082 errorText_ = "RtApiWasapi::wasapiThread: Unable to close stream stop thread handle.";
5086 callbackStopped = true;
5088 else if ( callbackResult == 2 ) {
5089 // instantiate a thread to stop this thread
5090 HANDLE threadHandle = CreateThread( NULL, 0, abortWasapiThread, this, 0, NULL );
5091 if ( !threadHandle ) {
5092 errorType = RtAudioError::THREAD_ERROR;
5093 errorText_ = "RtApiWasapi::wasapiThread: Unable to instantiate stream abort thread.";
5096 else if ( !CloseHandle( threadHandle ) ) {
5097 errorType = RtAudioError::THREAD_ERROR;
5098 errorText_ = "RtApiWasapi::wasapiThread: Unable to close stream abort thread handle.";
5102 callbackStopped = true;
5109 // 1. Convert callback buffer to stream format
5110 // 2. Convert callback buffer to stream sample rate and channel count
5111 // 3. Push callback buffer into outputBuffer
5113 if ( renderAudioClient && callbackPulled ) {
5114 if ( stream_.doConvertBuffer[OUTPUT] ) {
5115 // Convert callback buffer to stream format
5116 convertBuffer( stream_.deviceBuffer,
5117 stream_.userBuffer[OUTPUT],
5118 stream_.convertInfo[OUTPUT] );
5122 // Convert callback buffer to stream sample rate
5123 convertBufferWasapi( convBuffer,
5124 stream_.deviceBuffer,
5125 stream_.nDeviceChannels[OUTPUT],
5127 renderFormat->nSamplesPerSec,
5130 stream_.deviceFormat[OUTPUT] );
5132 // Push callback buffer into outputBuffer
5133 callbackPushed = renderBuffer.pushBuffer( convBuffer,
5134 convBufferSize * stream_.nDeviceChannels[OUTPUT],
5135 stream_.deviceFormat[OUTPUT] );
5138 // if there is no render stream, set callbackPushed flag
5139 callbackPushed = true;
5144 // 1. Get capture buffer from stream
5145 // 2. Push capture buffer into inputBuffer
5146 // 3. If 2. was successful: Release capture buffer
5148 if ( captureAudioClient ) {
5149 // if the callback input buffer was not pulled from captureBuffer, wait for next capture event
5150 if ( !callbackPulled ) {
5151 WaitForSingleObject( captureEvent, INFINITE );
5154 // Get capture buffer from stream
5155 hr = captureClient->GetBuffer( &streamBuffer,
5157 &captureFlags, NULL, NULL );
5158 if ( FAILED( hr ) ) {
5159 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve capture buffer.";
5163 if ( bufferFrameCount != 0 ) {
5164 // Push capture buffer into inputBuffer
5165 if ( captureBuffer.pushBuffer( ( char* ) streamBuffer,
5166 bufferFrameCount * stream_.nDeviceChannels[INPUT],
5167 stream_.deviceFormat[INPUT] ) )
5169 // Release capture buffer
5170 hr = captureClient->ReleaseBuffer( bufferFrameCount );
5171 if ( FAILED( hr ) ) {
5172 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5178 // Inform WASAPI that capture was unsuccessful
5179 hr = captureClient->ReleaseBuffer( 0 );
5180 if ( FAILED( hr ) ) {
5181 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5188 // Inform WASAPI that capture was unsuccessful
5189 hr = captureClient->ReleaseBuffer( 0 );
5190 if ( FAILED( hr ) ) {
5191 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5199 // 1. Get render buffer from stream
5200 // 2. Pull next buffer from outputBuffer
5201 // 3. If 2. was successful: Fill render buffer with next buffer
5202 // Release render buffer
5204 if ( renderAudioClient ) {
5205 // if the callback output buffer was not pushed to renderBuffer, wait for next render event
5206 if ( callbackPulled && !callbackPushed ) {
5207 WaitForSingleObject( renderEvent, INFINITE );
5210 // Get render buffer from stream
5211 hr = renderAudioClient->GetBufferSize( &bufferFrameCount );
5212 if ( FAILED( hr ) ) {
5213 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer size.";
5217 hr = renderAudioClient->GetCurrentPadding( &numFramesPadding );
5218 if ( FAILED( hr ) ) {
5219 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer padding.";
5223 bufferFrameCount -= numFramesPadding;
5225 if ( bufferFrameCount != 0 ) {
5226 hr = renderClient->GetBuffer( bufferFrameCount, &streamBuffer );
5227 if ( FAILED( hr ) ) {
5228 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer.";
5232 // Pull next buffer from outputBuffer
5233 // Fill render buffer with next buffer
5234 if ( renderBuffer.pullBuffer( ( char* ) streamBuffer,
5235 bufferFrameCount * stream_.nDeviceChannels[OUTPUT],
5236 stream_.deviceFormat[OUTPUT] ) )
5238 // Release render buffer
5239 hr = renderClient->ReleaseBuffer( bufferFrameCount, 0 );
5240 if ( FAILED( hr ) ) {
5241 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5247 // Inform WASAPI that render was unsuccessful
5248 hr = renderClient->ReleaseBuffer( 0, 0 );
5249 if ( FAILED( hr ) ) {
5250 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5257 // Inform WASAPI that render was unsuccessful
5258 hr = renderClient->ReleaseBuffer( 0, 0 );
5259 if ( FAILED( hr ) ) {
5260 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5266 // if the callback buffer was pushed renderBuffer reset callbackPulled flag
5267 if ( callbackPushed ) {
5268 callbackPulled = false;
5270 RtApi::tickStreamTime();
5277 CoTaskMemFree( captureFormat );
5278 CoTaskMemFree( renderFormat );
5280 free ( convBuffer );
5284 // update stream state
5285 stream_.state = STREAM_STOPPED;
5287 if ( errorText_.empty() )
5293 //******************** End of __WINDOWS_WASAPI__ *********************//
5297 #if defined(__WINDOWS_DS__) // Windows DirectSound API
5299 // Modified by Robin Davies, October 2005
5300 // - Improvements to DirectX pointer chasing.
5301 // - Bug fix for non-power-of-two Asio granularity used by Edirol PCR-A30.
5302 // - Auto-call CoInitialize for DSOUND and ASIO platforms.
5303 // Various revisions for RtAudio 4.0 by Gary Scavone, April 2007
5304 // Changed device query structure for RtAudio 4.0.7, January 2010
5306 #include <mmsystem.h>
5310 #include <algorithm>
5312 #if defined(__MINGW32__)
5313 // missing from latest mingw winapi
5314 #define WAVE_FORMAT_96M08 0x00010000 /* 96 kHz, Mono, 8-bit */
5315 #define WAVE_FORMAT_96S08 0x00020000 /* 96 kHz, Stereo, 8-bit */
5316 #define WAVE_FORMAT_96M16 0x00040000 /* 96 kHz, Mono, 16-bit */
5317 #define WAVE_FORMAT_96S16 0x00080000 /* 96 kHz, Stereo, 16-bit */
5320 #define MINIMUM_DEVICE_BUFFER_SIZE 32768
5322 #ifdef _MSC_VER // if Microsoft Visual C++
5323 #pragma comment( lib, "winmm.lib" ) // then, auto-link winmm.lib. Otherwise, it has to be added manually.
5326 static inline DWORD dsPointerBetween( DWORD pointer, DWORD laterPointer, DWORD earlierPointer, DWORD bufferSize )
5328 if ( pointer > bufferSize ) pointer -= bufferSize;
5329 if ( laterPointer < earlierPointer ) laterPointer += bufferSize;
5330 if ( pointer < earlierPointer ) pointer += bufferSize;
5331 return pointer >= earlierPointer && pointer < laterPointer;
5334 // A structure to hold various information related to the DirectSound
5335 // API implementation.
5337 unsigned int drainCounter; // Tracks callback counts when draining
5338 bool internalDrain; // Indicates if stop is initiated from callback or not.
5342 UINT bufferPointer[2];
5343 DWORD dsBufferSize[2];
5344 DWORD dsPointerLeadTime[2]; // the number of bytes ahead of the safe pointer to lead by.
5348 :drainCounter(0), internalDrain(false) { id[0] = 0; id[1] = 0; buffer[0] = 0; buffer[1] = 0; xrun[0] = false; xrun[1] = false; bufferPointer[0] = 0; bufferPointer[1] = 0; }
5351 // Declarations for utility functions, callbacks, and structures
5352 // specific to the DirectSound implementation.
5353 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
5354 LPCTSTR description,
5358 static const char* getErrorString( int code );
5360 static unsigned __stdcall callbackHandler( void *ptr );
5369 : found(false) { validId[0] = false; validId[1] = false; }
5372 struct DsProbeData {
5374 std::vector<struct DsDevice>* dsDevices;
5377 RtApiDs :: RtApiDs()
5379 // Dsound will run both-threaded. If CoInitialize fails, then just
5380 // accept whatever the mainline chose for a threading model.
5381 coInitialized_ = false;
5382 HRESULT hr = CoInitialize( NULL );
5383 if ( !FAILED( hr ) ) coInitialized_ = true;
5386 RtApiDs :: ~RtApiDs()
5388 if ( stream_.state != STREAM_CLOSED ) closeStream();
5389 if ( coInitialized_ ) CoUninitialize(); // balanced call.
5392 // The DirectSound default output is always the first device.
5393 unsigned int RtApiDs :: getDefaultOutputDevice( void )
5398 // The DirectSound default input is always the first input device,
5399 // which is the first capture device enumerated.
5400 unsigned int RtApiDs :: getDefaultInputDevice( void )
5405 unsigned int RtApiDs :: getDeviceCount( void )
5407 // Set query flag for previously found devices to false, so that we
5408 // can check for any devices that have disappeared.
5409 for ( unsigned int i=0; i<dsDevices.size(); i++ )
5410 dsDevices[i].found = false;
5412 // Query DirectSound devices.
5413 struct DsProbeData probeInfo;
5414 probeInfo.isInput = false;
5415 probeInfo.dsDevices = &dsDevices;
5416 HRESULT result = DirectSoundEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
5417 if ( FAILED( result ) ) {
5418 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating output devices!";
5419 errorText_ = errorStream_.str();
5420 error( RtAudioError::WARNING );
5423 // Query DirectSoundCapture devices.
5424 probeInfo.isInput = true;
5425 result = DirectSoundCaptureEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
5426 if ( FAILED( result ) ) {
5427 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating input devices!";
5428 errorText_ = errorStream_.str();
5429 error( RtAudioError::WARNING );
5432 // Clean out any devices that may have disappeared (code update submitted by Eli Zehngut).
5433 for ( unsigned int i=0; i<dsDevices.size(); ) {
5434 if ( dsDevices[i].found == false ) dsDevices.erase( dsDevices.begin() + i );
5438 return static_cast<unsigned int>(dsDevices.size());
5441 RtAudio::DeviceInfo RtApiDs :: getDeviceInfo( unsigned int device )
5443 RtAudio::DeviceInfo info;
5444 info.probed = false;
5446 if ( dsDevices.size() == 0 ) {
5447 // Force a query of all devices
5449 if ( dsDevices.size() == 0 ) {
5450 errorText_ = "RtApiDs::getDeviceInfo: no devices found!";
5451 error( RtAudioError::INVALID_USE );
5456 if ( device >= dsDevices.size() ) {
5457 errorText_ = "RtApiDs::getDeviceInfo: device ID is invalid!";
5458 error( RtAudioError::INVALID_USE );
5463 if ( dsDevices[ device ].validId[0] == false ) goto probeInput;
5465 LPDIRECTSOUND output;
5467 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
5468 if ( FAILED( result ) ) {
5469 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
5470 errorText_ = errorStream_.str();
5471 error( RtAudioError::WARNING );
5475 outCaps.dwSize = sizeof( outCaps );
5476 result = output->GetCaps( &outCaps );
5477 if ( FAILED( result ) ) {
5479 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting capabilities!";
5480 errorText_ = errorStream_.str();
5481 error( RtAudioError::WARNING );
5485 // Get output channel information.
5486 info.outputChannels = ( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ? 2 : 1;
5488 // Get sample rate information.
5489 info.sampleRates.clear();
5490 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
5491 if ( SAMPLE_RATES[k] >= (unsigned int) outCaps.dwMinSecondarySampleRate &&
5492 SAMPLE_RATES[k] <= (unsigned int) outCaps.dwMaxSecondarySampleRate ) {
5493 info.sampleRates.push_back( SAMPLE_RATES[k] );
5495 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
5496 info.preferredSampleRate = SAMPLE_RATES[k];
5500 // Get format information.
5501 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT ) info.nativeFormats |= RTAUDIO_SINT16;
5502 if ( outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) info.nativeFormats |= RTAUDIO_SINT8;
5506 if ( getDefaultOutputDevice() == device )
5507 info.isDefaultOutput = true;
5509 if ( dsDevices[ device ].validId[1] == false ) {
5510 info.name = dsDevices[ device ].name;
5517 LPDIRECTSOUNDCAPTURE input;
5518 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
5519 if ( FAILED( result ) ) {
5520 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
5521 errorText_ = errorStream_.str();
5522 error( RtAudioError::WARNING );
5527 inCaps.dwSize = sizeof( inCaps );
5528 result = input->GetCaps( &inCaps );
5529 if ( FAILED( result ) ) {
5531 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting object capabilities (" << dsDevices[ device ].name << ")!";
5532 errorText_ = errorStream_.str();
5533 error( RtAudioError::WARNING );
5537 // Get input channel information.
5538 info.inputChannels = inCaps.dwChannels;
5540 // Get sample rate and format information.
5541 std::vector<unsigned int> rates;
5542 if ( inCaps.dwChannels >= 2 ) {
5543 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5544 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5545 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5546 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5547 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5548 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5549 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5550 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5552 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
5553 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) rates.push_back( 11025 );
5554 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) rates.push_back( 22050 );
5555 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) rates.push_back( 44100 );
5556 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) rates.push_back( 96000 );
5558 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
5559 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) rates.push_back( 11025 );
5560 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) rates.push_back( 22050 );
5561 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) rates.push_back( 44100 );
5562 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) rates.push_back( 96000 );
5565 else if ( inCaps.dwChannels == 1 ) {
5566 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5567 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5568 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5569 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5570 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5571 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5572 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5573 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5575 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
5576 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) rates.push_back( 11025 );
5577 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) rates.push_back( 22050 );
5578 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) rates.push_back( 44100 );
5579 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) rates.push_back( 96000 );
5581 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
5582 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) rates.push_back( 11025 );
5583 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) rates.push_back( 22050 );
5584 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) rates.push_back( 44100 );
5585 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) rates.push_back( 96000 );
5588 else info.inputChannels = 0; // technically, this would be an error
5592 if ( info.inputChannels == 0 ) return info;
5594 // Copy the supported rates to the info structure but avoid duplication.
5596 for ( unsigned int i=0; i<rates.size(); i++ ) {
5598 for ( unsigned int j=0; j<info.sampleRates.size(); j++ ) {
5599 if ( rates[i] == info.sampleRates[j] ) {
5604 if ( found == false ) info.sampleRates.push_back( rates[i] );
5606 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
5608 // If device opens for both playback and capture, we determine the channels.
5609 if ( info.outputChannels > 0 && info.inputChannels > 0 )
5610 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
5612 if ( device == 0 ) info.isDefaultInput = true;
5614 // Copy name and return.
5615 info.name = dsDevices[ device ].name;
5620 bool RtApiDs :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
5621 unsigned int firstChannel, unsigned int sampleRate,
5622 RtAudioFormat format, unsigned int *bufferSize,
5623 RtAudio::StreamOptions *options )
5625 if ( channels + firstChannel > 2 ) {
5626 errorText_ = "RtApiDs::probeDeviceOpen: DirectSound does not support more than 2 channels per device.";
5630 size_t nDevices = dsDevices.size();
5631 if ( nDevices == 0 ) {
5632 // This should not happen because a check is made before this function is called.
5633 errorText_ = "RtApiDs::probeDeviceOpen: no devices found!";
5637 if ( device >= nDevices ) {
5638 // This should not happen because a check is made before this function is called.
5639 errorText_ = "RtApiDs::probeDeviceOpen: device ID is invalid!";
5643 if ( mode == OUTPUT ) {
5644 if ( dsDevices[ device ].validId[0] == false ) {
5645 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support output!";
5646 errorText_ = errorStream_.str();
5650 else { // mode == INPUT
5651 if ( dsDevices[ device ].validId[1] == false ) {
5652 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support input!";
5653 errorText_ = errorStream_.str();
5658 // According to a note in PortAudio, using GetDesktopWindow()
5659 // instead of GetForegroundWindow() is supposed to avoid problems
5660 // that occur when the application's window is not the foreground
5661 // window. Also, if the application window closes before the
5662 // DirectSound buffer, DirectSound can crash. In the past, I had
5663 // problems when using GetDesktopWindow() but it seems fine now
5664 // (January 2010). I'll leave it commented here.
5665 // HWND hWnd = GetForegroundWindow();
5666 HWND hWnd = GetDesktopWindow();
5668 // Check the numberOfBuffers parameter and limit the lowest value to
5669 // two. This is a judgement call and a value of two is probably too
5670 // low for capture, but it should work for playback.
5672 if ( options ) nBuffers = options->numberOfBuffers;
5673 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) nBuffers = 2;
5674 if ( nBuffers < 2 ) nBuffers = 3;
5676 // Check the lower range of the user-specified buffer size and set
5677 // (arbitrarily) to a lower bound of 32.
5678 if ( *bufferSize < 32 ) *bufferSize = 32;
5680 // Create the wave format structure. The data format setting will
5681 // be determined later.
5682 WAVEFORMATEX waveFormat;
5683 ZeroMemory( &waveFormat, sizeof(WAVEFORMATEX) );
5684 waveFormat.wFormatTag = WAVE_FORMAT_PCM;
5685 waveFormat.nChannels = channels + firstChannel;
5686 waveFormat.nSamplesPerSec = (unsigned long) sampleRate;
5688 // Determine the device buffer size. By default, we'll use the value
5689 // defined above (32K), but we will grow it to make allowances for
5690 // very large software buffer sizes.
5691 DWORD dsBufferSize = MINIMUM_DEVICE_BUFFER_SIZE;
5692 DWORD dsPointerLeadTime = 0;
5694 void *ohandle = 0, *bhandle = 0;
5696 if ( mode == OUTPUT ) {
5698 LPDIRECTSOUND output;
5699 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
5700 if ( FAILED( result ) ) {
5701 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
5702 errorText_ = errorStream_.str();
5707 outCaps.dwSize = sizeof( outCaps );
5708 result = output->GetCaps( &outCaps );
5709 if ( FAILED( result ) ) {
5711 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting capabilities (" << dsDevices[ device ].name << ")!";
5712 errorText_ = errorStream_.str();
5716 // Check channel information.
5717 if ( channels + firstChannel == 2 && !( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ) {
5718 errorStream_ << "RtApiDs::getDeviceInfo: the output device (" << dsDevices[ device ].name << ") does not support stereo playback.";
5719 errorText_ = errorStream_.str();
5723 // Check format information. Use 16-bit format unless not
5724 // supported or user requests 8-bit.
5725 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT &&
5726 !( format == RTAUDIO_SINT8 && outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) ) {
5727 waveFormat.wBitsPerSample = 16;
5728 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
5731 waveFormat.wBitsPerSample = 8;
5732 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
5734 stream_.userFormat = format;
5736 // Update wave format structure and buffer information.
5737 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
5738 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
5739 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
5741 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
5742 while ( dsPointerLeadTime * 2U > dsBufferSize )
5745 // Set cooperative level to DSSCL_EXCLUSIVE ... sound stops when window focus changes.
5746 // result = output->SetCooperativeLevel( hWnd, DSSCL_EXCLUSIVE );
5747 // Set cooperative level to DSSCL_PRIORITY ... sound remains when window focus changes.
5748 result = output->SetCooperativeLevel( hWnd, DSSCL_PRIORITY );
5749 if ( FAILED( result ) ) {
5751 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting cooperative level (" << dsDevices[ device ].name << ")!";
5752 errorText_ = errorStream_.str();
5756 // Even though we will write to the secondary buffer, we need to
5757 // access the primary buffer to set the correct output format
5758 // (since the default is 8-bit, 22 kHz!). Setup the DS primary
5759 // buffer description.
5760 DSBUFFERDESC bufferDescription;
5761 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
5762 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
5763 bufferDescription.dwFlags = DSBCAPS_PRIMARYBUFFER;
5765 // Obtain the primary buffer
5766 LPDIRECTSOUNDBUFFER buffer;
5767 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
5768 if ( FAILED( result ) ) {
5770 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") accessing primary buffer (" << dsDevices[ device ].name << ")!";
5771 errorText_ = errorStream_.str();
5775 // Set the primary DS buffer sound format.
5776 result = buffer->SetFormat( &waveFormat );
5777 if ( FAILED( result ) ) {
5779 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting primary buffer format (" << dsDevices[ device ].name << ")!";
5780 errorText_ = errorStream_.str();
5784 // Setup the secondary DS buffer description.
5785 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
5786 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
5787 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
5788 DSBCAPS_GLOBALFOCUS |
5789 DSBCAPS_GETCURRENTPOSITION2 |
5790 DSBCAPS_LOCHARDWARE ); // Force hardware mixing
5791 bufferDescription.dwBufferBytes = dsBufferSize;
5792 bufferDescription.lpwfxFormat = &waveFormat;
5794 // Try to create the secondary DS buffer. If that doesn't work,
5795 // try to use software mixing. Otherwise, there's a problem.
5796 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
5797 if ( FAILED( result ) ) {
5798 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
5799 DSBCAPS_GLOBALFOCUS |
5800 DSBCAPS_GETCURRENTPOSITION2 |
5801 DSBCAPS_LOCSOFTWARE ); // Force software mixing
5802 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
5803 if ( FAILED( result ) ) {
5805 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating secondary buffer (" << dsDevices[ device ].name << ")!";
5806 errorText_ = errorStream_.str();
5811 // Get the buffer size ... might be different from what we specified.
5813 dsbcaps.dwSize = sizeof( DSBCAPS );
5814 result = buffer->GetCaps( &dsbcaps );
5815 if ( FAILED( result ) ) {
5818 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
5819 errorText_ = errorStream_.str();
5823 dsBufferSize = dsbcaps.dwBufferBytes;
5825 // Lock the DS buffer
5828 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
5829 if ( FAILED( result ) ) {
5832 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking buffer (" << dsDevices[ device ].name << ")!";
5833 errorText_ = errorStream_.str();
5837 // Zero the DS buffer
5838 ZeroMemory( audioPtr, dataLen );
5840 // Unlock the DS buffer
5841 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
5842 if ( FAILED( result ) ) {
5845 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking buffer (" << dsDevices[ device ].name << ")!";
5846 errorText_ = errorStream_.str();
5850 ohandle = (void *) output;
5851 bhandle = (void *) buffer;
5854 if ( mode == INPUT ) {
5856 LPDIRECTSOUNDCAPTURE input;
5857 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
5858 if ( FAILED( result ) ) {
5859 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
5860 errorText_ = errorStream_.str();
5865 inCaps.dwSize = sizeof( inCaps );
5866 result = input->GetCaps( &inCaps );
5867 if ( FAILED( result ) ) {
5869 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting input capabilities (" << dsDevices[ device ].name << ")!";
5870 errorText_ = errorStream_.str();
5874 // Check channel information.
5875 if ( inCaps.dwChannels < channels + firstChannel ) {
5876 errorText_ = "RtApiDs::getDeviceInfo: the input device does not support requested input channels.";
5880 // Check format information. Use 16-bit format unless user
5882 DWORD deviceFormats;
5883 if ( channels + firstChannel == 2 ) {
5884 deviceFormats = WAVE_FORMAT_1S08 | WAVE_FORMAT_2S08 | WAVE_FORMAT_4S08 | WAVE_FORMAT_96S08;
5885 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
5886 waveFormat.wBitsPerSample = 8;
5887 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
5889 else { // assume 16-bit is supported
5890 waveFormat.wBitsPerSample = 16;
5891 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
5894 else { // channel == 1
5895 deviceFormats = WAVE_FORMAT_1M08 | WAVE_FORMAT_2M08 | WAVE_FORMAT_4M08 | WAVE_FORMAT_96M08;
5896 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
5897 waveFormat.wBitsPerSample = 8;
5898 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
5900 else { // assume 16-bit is supported
5901 waveFormat.wBitsPerSample = 16;
5902 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
5905 stream_.userFormat = format;
5907 // Update wave format structure and buffer information.
5908 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
5909 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
5910 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
5912 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
5913 while ( dsPointerLeadTime * 2U > dsBufferSize )
5916 // Setup the secondary DS buffer description.
5917 DSCBUFFERDESC bufferDescription;
5918 ZeroMemory( &bufferDescription, sizeof( DSCBUFFERDESC ) );
5919 bufferDescription.dwSize = sizeof( DSCBUFFERDESC );
5920 bufferDescription.dwFlags = 0;
5921 bufferDescription.dwReserved = 0;
5922 bufferDescription.dwBufferBytes = dsBufferSize;
5923 bufferDescription.lpwfxFormat = &waveFormat;
5925 // Create the capture buffer.
5926 LPDIRECTSOUNDCAPTUREBUFFER buffer;
5927 result = input->CreateCaptureBuffer( &bufferDescription, &buffer, NULL );
5928 if ( FAILED( result ) ) {
5930 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating input buffer (" << dsDevices[ device ].name << ")!";
5931 errorText_ = errorStream_.str();
5935 // Get the buffer size ... might be different from what we specified.
5937 dscbcaps.dwSize = sizeof( DSCBCAPS );
5938 result = buffer->GetCaps( &dscbcaps );
5939 if ( FAILED( result ) ) {
5942 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
5943 errorText_ = errorStream_.str();
5947 dsBufferSize = dscbcaps.dwBufferBytes;
5949 // NOTE: We could have a problem here if this is a duplex stream
5950 // and the play and capture hardware buffer sizes are different
5951 // (I'm actually not sure if that is a problem or not).
5952 // Currently, we are not verifying that.
5954 // Lock the capture buffer
5957 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
5958 if ( FAILED( result ) ) {
5961 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking input buffer (" << dsDevices[ device ].name << ")!";
5962 errorText_ = errorStream_.str();
5967 ZeroMemory( audioPtr, dataLen );
5969 // Unlock the buffer
5970 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
5971 if ( FAILED( result ) ) {
5974 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking input buffer (" << dsDevices[ device ].name << ")!";
5975 errorText_ = errorStream_.str();
5979 ohandle = (void *) input;
5980 bhandle = (void *) buffer;
5983 // Set various stream parameters
5984 DsHandle *handle = 0;
5985 stream_.nDeviceChannels[mode] = channels + firstChannel;
5986 stream_.nUserChannels[mode] = channels;
5987 stream_.bufferSize = *bufferSize;
5988 stream_.channelOffset[mode] = firstChannel;
5989 stream_.deviceInterleaved[mode] = true;
5990 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
5991 else stream_.userInterleaved = true;
5993 // Set flag for buffer conversion
5994 stream_.doConvertBuffer[mode] = false;
5995 if (stream_.nUserChannels[mode] != stream_.nDeviceChannels[mode])
5996 stream_.doConvertBuffer[mode] = true;
5997 if (stream_.userFormat != stream_.deviceFormat[mode])
5998 stream_.doConvertBuffer[mode] = true;
5999 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
6000 stream_.nUserChannels[mode] > 1 )
6001 stream_.doConvertBuffer[mode] = true;
6003 // Allocate necessary internal buffers
6004 long bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
6005 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
6006 if ( stream_.userBuffer[mode] == NULL ) {
6007 errorText_ = "RtApiDs::probeDeviceOpen: error allocating user buffer memory.";
6011 if ( stream_.doConvertBuffer[mode] ) {
6013 bool makeBuffer = true;
6014 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
6015 if ( mode == INPUT ) {
6016 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
6017 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
6018 if ( bufferBytes <= (long) bytesOut ) makeBuffer = false;
6023 bufferBytes *= *bufferSize;
6024 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
6025 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
6026 if ( stream_.deviceBuffer == NULL ) {
6027 errorText_ = "RtApiDs::probeDeviceOpen: error allocating device buffer memory.";
6033 // Allocate our DsHandle structures for the stream.
6034 if ( stream_.apiHandle == 0 ) {
6036 handle = new DsHandle;
6038 catch ( std::bad_alloc& ) {
6039 errorText_ = "RtApiDs::probeDeviceOpen: error allocating AsioHandle memory.";
6043 // Create a manual-reset event.
6044 handle->condition = CreateEvent( NULL, // no security
6045 TRUE, // manual-reset
6046 FALSE, // non-signaled initially
6048 stream_.apiHandle = (void *) handle;
6051 handle = (DsHandle *) stream_.apiHandle;
6052 handle->id[mode] = ohandle;
6053 handle->buffer[mode] = bhandle;
6054 handle->dsBufferSize[mode] = dsBufferSize;
6055 handle->dsPointerLeadTime[mode] = dsPointerLeadTime;
6057 stream_.device[mode] = device;
6058 stream_.state = STREAM_STOPPED;
6059 if ( stream_.mode == OUTPUT && mode == INPUT )
6060 // We had already set up an output stream.
6061 stream_.mode = DUPLEX;
6063 stream_.mode = mode;
6064 stream_.nBuffers = nBuffers;
6065 stream_.sampleRate = sampleRate;
6067 // Setup the buffer conversion information structure.
6068 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
6070 // Setup the callback thread.
6071 if ( stream_.callbackInfo.isRunning == false ) {
6073 stream_.callbackInfo.isRunning = true;
6074 stream_.callbackInfo.object = (void *) this;
6075 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &callbackHandler,
6076 &stream_.callbackInfo, 0, &threadId );
6077 if ( stream_.callbackInfo.thread == 0 ) {
6078 errorText_ = "RtApiDs::probeDeviceOpen: error creating callback thread!";
6082 // Boost DS thread priority
6083 SetThreadPriority( (HANDLE) stream_.callbackInfo.thread, THREAD_PRIORITY_HIGHEST );
6089 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
6090 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
6091 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6092 if ( buffer ) buffer->Release();
6095 if ( handle->buffer[1] ) {
6096 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
6097 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6098 if ( buffer ) buffer->Release();
6101 CloseHandle( handle->condition );
6103 stream_.apiHandle = 0;
6106 for ( int i=0; i<2; i++ ) {
6107 if ( stream_.userBuffer[i] ) {
6108 free( stream_.userBuffer[i] );
6109 stream_.userBuffer[i] = 0;
6113 if ( stream_.deviceBuffer ) {
6114 free( stream_.deviceBuffer );
6115 stream_.deviceBuffer = 0;
6118 stream_.state = STREAM_CLOSED;
6122 void RtApiDs :: closeStream()
6124 if ( stream_.state == STREAM_CLOSED ) {
6125 errorText_ = "RtApiDs::closeStream(): no open stream to close!";
6126 error( RtAudioError::WARNING );
6130 // Stop the callback thread.
6131 stream_.callbackInfo.isRunning = false;
6132 WaitForSingleObject( (HANDLE) stream_.callbackInfo.thread, INFINITE );
6133 CloseHandle( (HANDLE) stream_.callbackInfo.thread );
6135 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6137 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
6138 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
6139 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6146 if ( handle->buffer[1] ) {
6147 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
6148 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6155 CloseHandle( handle->condition );
6157 stream_.apiHandle = 0;
6160 for ( int i=0; i<2; i++ ) {
6161 if ( stream_.userBuffer[i] ) {
6162 free( stream_.userBuffer[i] );
6163 stream_.userBuffer[i] = 0;
6167 if ( stream_.deviceBuffer ) {
6168 free( stream_.deviceBuffer );
6169 stream_.deviceBuffer = 0;
6172 stream_.mode = UNINITIALIZED;
6173 stream_.state = STREAM_CLOSED;
6176 void RtApiDs :: startStream()
6179 RtApi::startStream();
6180 if ( stream_.state == STREAM_RUNNING ) {
6181 errorText_ = "RtApiDs::startStream(): the stream is already running!";
6182 error( RtAudioError::WARNING );
6186 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6188 // Increase scheduler frequency on lesser windows (a side-effect of
6189 // increasing timer accuracy). On greater windows (Win2K or later),
6190 // this is already in effect.
6191 timeBeginPeriod( 1 );
6193 buffersRolling = false;
6194 duplexPrerollBytes = 0;
6196 if ( stream_.mode == DUPLEX ) {
6197 // 0.5 seconds of silence in DUPLEX mode while the devices spin up and synchronize.
6198 duplexPrerollBytes = (int) ( 0.5 * stream_.sampleRate * formatBytes( stream_.deviceFormat[1] ) * stream_.nDeviceChannels[1] );
6202 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6204 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6205 result = buffer->Play( 0, 0, DSBPLAY_LOOPING );
6206 if ( FAILED( result ) ) {
6207 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting output buffer!";
6208 errorText_ = errorStream_.str();
6213 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6215 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6216 result = buffer->Start( DSCBSTART_LOOPING );
6217 if ( FAILED( result ) ) {
6218 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting input buffer!";
6219 errorText_ = errorStream_.str();
6224 handle->drainCounter = 0;
6225 handle->internalDrain = false;
6226 ResetEvent( handle->condition );
6227 stream_.state = STREAM_RUNNING;
6230 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
6233 void RtApiDs :: stopStream()
6236 RtApi::startStream();
6237 if ( stream_.state == STREAM_STOPPED ) {
6238 errorText_ = "RtApiDs::stopStream(): the stream is already stopped!";
6239 error( RtAudioError::WARNING );
6246 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6247 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6248 if ( handle->drainCounter == 0 ) {
6249 handle->drainCounter = 2;
6250 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
6253 stream_.state = STREAM_STOPPED;
6255 MUTEX_LOCK( &stream_.mutex );
6257 // Stop the buffer and clear memory
6258 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6259 result = buffer->Stop();
6260 if ( FAILED( result ) ) {
6261 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping output buffer!";
6262 errorText_ = errorStream_.str();
6266 // Lock the buffer and clear it so that if we start to play again,
6267 // we won't have old data playing.
6268 result = buffer->Lock( 0, handle->dsBufferSize[0], &audioPtr, &dataLen, NULL, NULL, 0 );
6269 if ( FAILED( result ) ) {
6270 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking output buffer!";
6271 errorText_ = errorStream_.str();
6275 // Zero the DS buffer
6276 ZeroMemory( audioPtr, dataLen );
6278 // Unlock the DS buffer
6279 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6280 if ( FAILED( result ) ) {
6281 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking output buffer!";
6282 errorText_ = errorStream_.str();
6286 // If we start playing again, we must begin at beginning of buffer.
6287 handle->bufferPointer[0] = 0;
6290 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6291 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6295 stream_.state = STREAM_STOPPED;
6297 if ( stream_.mode != DUPLEX )
6298 MUTEX_LOCK( &stream_.mutex );
6300 result = buffer->Stop();
6301 if ( FAILED( result ) ) {
6302 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping input buffer!";
6303 errorText_ = errorStream_.str();
6307 // Lock the buffer and clear it so that if we start to play again,
6308 // we won't have old data playing.
6309 result = buffer->Lock( 0, handle->dsBufferSize[1], &audioPtr, &dataLen, NULL, NULL, 0 );
6310 if ( FAILED( result ) ) {
6311 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking input buffer!";
6312 errorText_ = errorStream_.str();
6316 // Zero the DS buffer
6317 ZeroMemory( audioPtr, dataLen );
6319 // Unlock the DS buffer
6320 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6321 if ( FAILED( result ) ) {
6322 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking input buffer!";
6323 errorText_ = errorStream_.str();
6327 // If we start recording again, we must begin at beginning of buffer.
6328 handle->bufferPointer[1] = 0;
6332 timeEndPeriod( 1 ); // revert to normal scheduler frequency on lesser windows.
6333 MUTEX_UNLOCK( &stream_.mutex );
6335 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
6338 void RtApiDs :: abortStream()
6341 if ( stream_.state == STREAM_STOPPED ) {
6342 errorText_ = "RtApiDs::abortStream(): the stream is already stopped!";
6343 error( RtAudioError::WARNING );
6347 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6348 handle->drainCounter = 2;
6353 void RtApiDs :: callbackEvent()
6355 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) {
6356 Sleep( 50 ); // sleep 50 milliseconds
6360 if ( stream_.state == STREAM_CLOSED ) {
6361 errorText_ = "RtApiDs::callbackEvent(): the stream is closed ... this shouldn't happen!";
6362 error( RtAudioError::WARNING );
6366 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
6367 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6369 // Check if we were draining the stream and signal is finished.
6370 if ( handle->drainCounter > stream_.nBuffers + 2 ) {
6372 stream_.state = STREAM_STOPPING;
6373 if ( handle->internalDrain == false )
6374 SetEvent( handle->condition );
6380 // Invoke user callback to get fresh output data UNLESS we are
6382 if ( handle->drainCounter == 0 ) {
6383 RtAudioCallback callback = (RtAudioCallback) info->callback;
6384 double streamTime = getStreamTime();
6385 RtAudioStreamStatus status = 0;
6386 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
6387 status |= RTAUDIO_OUTPUT_UNDERFLOW;
6388 handle->xrun[0] = false;
6390 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
6391 status |= RTAUDIO_INPUT_OVERFLOW;
6392 handle->xrun[1] = false;
6394 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
6395 stream_.bufferSize, streamTime, status, info->userData );
6396 if ( cbReturnValue == 2 ) {
6397 stream_.state = STREAM_STOPPING;
6398 handle->drainCounter = 2;
6402 else if ( cbReturnValue == 1 ) {
6403 handle->drainCounter = 1;
6404 handle->internalDrain = true;
6409 DWORD currentWritePointer, safeWritePointer;
6410 DWORD currentReadPointer, safeReadPointer;
6411 UINT nextWritePointer;
6413 LPVOID buffer1 = NULL;
6414 LPVOID buffer2 = NULL;
6415 DWORD bufferSize1 = 0;
6416 DWORD bufferSize2 = 0;
6421 MUTEX_LOCK( &stream_.mutex );
6422 if ( stream_.state == STREAM_STOPPED ) {
6423 MUTEX_UNLOCK( &stream_.mutex );
6427 if ( buffersRolling == false ) {
6428 if ( stream_.mode == DUPLEX ) {
6429 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
6431 // It takes a while for the devices to get rolling. As a result,
6432 // there's no guarantee that the capture and write device pointers
6433 // will move in lockstep. Wait here for both devices to start
6434 // rolling, and then set our buffer pointers accordingly.
6435 // e.g. Crystal Drivers: the capture buffer starts up 5700 to 9600
6436 // bytes later than the write buffer.
6438 // Stub: a serious risk of having a pre-emptive scheduling round
6439 // take place between the two GetCurrentPosition calls... but I'm
6440 // really not sure how to solve the problem. Temporarily boost to
6441 // Realtime priority, maybe; but I'm not sure what priority the
6442 // DirectSound service threads run at. We *should* be roughly
6443 // within a ms or so of correct.
6445 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6446 LPDIRECTSOUNDCAPTUREBUFFER dsCaptureBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6448 DWORD startSafeWritePointer, startSafeReadPointer;
6450 result = dsWriteBuffer->GetCurrentPosition( NULL, &startSafeWritePointer );
6451 if ( FAILED( result ) ) {
6452 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6453 errorText_ = errorStream_.str();
6454 MUTEX_UNLOCK( &stream_.mutex );
6455 error( RtAudioError::SYSTEM_ERROR );
6458 result = dsCaptureBuffer->GetCurrentPosition( NULL, &startSafeReadPointer );
6459 if ( FAILED( result ) ) {
6460 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6461 errorText_ = errorStream_.str();
6462 MUTEX_UNLOCK( &stream_.mutex );
6463 error( RtAudioError::SYSTEM_ERROR );
6467 result = dsWriteBuffer->GetCurrentPosition( NULL, &safeWritePointer );
6468 if ( FAILED( result ) ) {
6469 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6470 errorText_ = errorStream_.str();
6471 MUTEX_UNLOCK( &stream_.mutex );
6472 error( RtAudioError::SYSTEM_ERROR );
6475 result = dsCaptureBuffer->GetCurrentPosition( NULL, &safeReadPointer );
6476 if ( FAILED( result ) ) {
6477 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6478 errorText_ = errorStream_.str();
6479 MUTEX_UNLOCK( &stream_.mutex );
6480 error( RtAudioError::SYSTEM_ERROR );
6483 if ( safeWritePointer != startSafeWritePointer && safeReadPointer != startSafeReadPointer ) break;
6487 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
6489 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
6490 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
6491 handle->bufferPointer[1] = safeReadPointer;
6493 else if ( stream_.mode == OUTPUT ) {
6495 // Set the proper nextWritePosition after initial startup.
6496 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6497 result = dsWriteBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
6498 if ( FAILED( result ) ) {
6499 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6500 errorText_ = errorStream_.str();
6501 MUTEX_UNLOCK( &stream_.mutex );
6502 error( RtAudioError::SYSTEM_ERROR );
6505 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
6506 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
6509 buffersRolling = true;
6512 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6514 LPDIRECTSOUNDBUFFER dsBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6516 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
6517 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
6518 bufferBytes *= formatBytes( stream_.userFormat );
6519 memset( stream_.userBuffer[0], 0, bufferBytes );
6522 // Setup parameters and do buffer conversion if necessary.
6523 if ( stream_.doConvertBuffer[0] ) {
6524 buffer = stream_.deviceBuffer;
6525 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
6526 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[0];
6527 bufferBytes *= formatBytes( stream_.deviceFormat[0] );
6530 buffer = stream_.userBuffer[0];
6531 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
6532 bufferBytes *= formatBytes( stream_.userFormat );
6535 // No byte swapping necessary in DirectSound implementation.
6537 // Ahhh ... windoze. 16-bit data is signed but 8-bit data is
6538 // unsigned. So, we need to convert our signed 8-bit data here to
6540 if ( stream_.deviceFormat[0] == RTAUDIO_SINT8 )
6541 for ( int i=0; i<bufferBytes; i++ ) buffer[i] = (unsigned char) ( buffer[i] + 128 );
6543 DWORD dsBufferSize = handle->dsBufferSize[0];
6544 nextWritePointer = handle->bufferPointer[0];
6546 DWORD endWrite, leadPointer;
6548 // Find out where the read and "safe write" pointers are.
6549 result = dsBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
6550 if ( FAILED( result ) ) {
6551 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6552 errorText_ = errorStream_.str();
6553 MUTEX_UNLOCK( &stream_.mutex );
6554 error( RtAudioError::SYSTEM_ERROR );
6558 // We will copy our output buffer into the region between
6559 // safeWritePointer and leadPointer. If leadPointer is not
6560 // beyond the next endWrite position, wait until it is.
6561 leadPointer = safeWritePointer + handle->dsPointerLeadTime[0];
6562 //std::cout << "safeWritePointer = " << safeWritePointer << ", leadPointer = " << leadPointer << ", nextWritePointer = " << nextWritePointer << std::endl;
6563 if ( leadPointer > dsBufferSize ) leadPointer -= dsBufferSize;
6564 if ( leadPointer < nextWritePointer ) leadPointer += dsBufferSize; // unwrap offset
6565 endWrite = nextWritePointer + bufferBytes;
6567 // Check whether the entire write region is behind the play pointer.
6568 if ( leadPointer >= endWrite ) break;
6570 // If we are here, then we must wait until the leadPointer advances
6571 // beyond the end of our next write region. We use the
6572 // Sleep() function to suspend operation until that happens.
6573 double millis = ( endWrite - leadPointer ) * 1000.0;
6574 millis /= ( formatBytes( stream_.deviceFormat[0]) * stream_.nDeviceChannels[0] * stream_.sampleRate);
6575 if ( millis < 1.0 ) millis = 1.0;
6576 Sleep( (DWORD) millis );
6579 if ( dsPointerBetween( nextWritePointer, safeWritePointer, currentWritePointer, dsBufferSize )
6580 || dsPointerBetween( endWrite, safeWritePointer, currentWritePointer, dsBufferSize ) ) {
6581 // We've strayed into the forbidden zone ... resync the read pointer.
6582 handle->xrun[0] = true;
6583 nextWritePointer = safeWritePointer + handle->dsPointerLeadTime[0] - bufferBytes;
6584 if ( nextWritePointer >= dsBufferSize ) nextWritePointer -= dsBufferSize;
6585 handle->bufferPointer[0] = nextWritePointer;
6586 endWrite = nextWritePointer + bufferBytes;
6589 // Lock free space in the buffer
6590 result = dsBuffer->Lock( nextWritePointer, bufferBytes, &buffer1,
6591 &bufferSize1, &buffer2, &bufferSize2, 0 );
6592 if ( FAILED( result ) ) {
6593 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking buffer during playback!";
6594 errorText_ = errorStream_.str();
6595 MUTEX_UNLOCK( &stream_.mutex );
6596 error( RtAudioError::SYSTEM_ERROR );
6600 // Copy our buffer into the DS buffer
6601 CopyMemory( buffer1, buffer, bufferSize1 );
6602 if ( buffer2 != NULL ) CopyMemory( buffer2, buffer+bufferSize1, bufferSize2 );
6604 // Update our buffer offset and unlock sound buffer
6605 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
6606 if ( FAILED( result ) ) {
6607 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking buffer during playback!";
6608 errorText_ = errorStream_.str();
6609 MUTEX_UNLOCK( &stream_.mutex );
6610 error( RtAudioError::SYSTEM_ERROR );
6613 nextWritePointer = ( nextWritePointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
6614 handle->bufferPointer[0] = nextWritePointer;
6617 // Don't bother draining input
6618 if ( handle->drainCounter ) {
6619 handle->drainCounter++;
6623 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6625 // Setup parameters.
6626 if ( stream_.doConvertBuffer[1] ) {
6627 buffer = stream_.deviceBuffer;
6628 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[1];
6629 bufferBytes *= formatBytes( stream_.deviceFormat[1] );
6632 buffer = stream_.userBuffer[1];
6633 bufferBytes = stream_.bufferSize * stream_.nUserChannels[1];
6634 bufferBytes *= formatBytes( stream_.userFormat );
6637 LPDIRECTSOUNDCAPTUREBUFFER dsBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6638 long nextReadPointer = handle->bufferPointer[1];
6639 DWORD dsBufferSize = handle->dsBufferSize[1];
6641 // Find out where the write and "safe read" pointers are.
6642 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
6643 if ( FAILED( result ) ) {
6644 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6645 errorText_ = errorStream_.str();
6646 MUTEX_UNLOCK( &stream_.mutex );
6647 error( RtAudioError::SYSTEM_ERROR );
6651 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
6652 DWORD endRead = nextReadPointer + bufferBytes;
6654 // Handling depends on whether we are INPUT or DUPLEX.
6655 // If we're in INPUT mode then waiting is a good thing. If we're in DUPLEX mode,
6656 // then a wait here will drag the write pointers into the forbidden zone.
6658 // In DUPLEX mode, rather than wait, we will back off the read pointer until
6659 // it's in a safe position. This causes dropouts, but it seems to be the only
6660 // practical way to sync up the read and write pointers reliably, given the
6661 // the very complex relationship between phase and increment of the read and write
6664 // In order to minimize audible dropouts in DUPLEX mode, we will
6665 // provide a pre-roll period of 0.5 seconds in which we return
6666 // zeros from the read buffer while the pointers sync up.
6668 if ( stream_.mode == DUPLEX ) {
6669 if ( safeReadPointer < endRead ) {
6670 if ( duplexPrerollBytes <= 0 ) {
6671 // Pre-roll time over. Be more agressive.
6672 int adjustment = endRead-safeReadPointer;
6674 handle->xrun[1] = true;
6676 // - large adjustments: we've probably run out of CPU cycles, so just resync exactly,
6677 // and perform fine adjustments later.
6678 // - small adjustments: back off by twice as much.
6679 if ( adjustment >= 2*bufferBytes )
6680 nextReadPointer = safeReadPointer-2*bufferBytes;
6682 nextReadPointer = safeReadPointer-bufferBytes-adjustment;
6684 if ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
6688 // In pre=roll time. Just do it.
6689 nextReadPointer = safeReadPointer - bufferBytes;
6690 while ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
6692 endRead = nextReadPointer + bufferBytes;
6695 else { // mode == INPUT
6696 while ( safeReadPointer < endRead && stream_.callbackInfo.isRunning ) {
6697 // See comments for playback.
6698 double millis = (endRead - safeReadPointer) * 1000.0;
6699 millis /= ( formatBytes(stream_.deviceFormat[1]) * stream_.nDeviceChannels[1] * stream_.sampleRate);
6700 if ( millis < 1.0 ) millis = 1.0;
6701 Sleep( (DWORD) millis );
6703 // Wake up and find out where we are now.
6704 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
6705 if ( FAILED( result ) ) {
6706 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6707 errorText_ = errorStream_.str();
6708 MUTEX_UNLOCK( &stream_.mutex );
6709 error( RtAudioError::SYSTEM_ERROR );
6713 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
6717 // Lock free space in the buffer
6718 result = dsBuffer->Lock( nextReadPointer, bufferBytes, &buffer1,
6719 &bufferSize1, &buffer2, &bufferSize2, 0 );
6720 if ( FAILED( result ) ) {
6721 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking capture buffer!";
6722 errorText_ = errorStream_.str();
6723 MUTEX_UNLOCK( &stream_.mutex );
6724 error( RtAudioError::SYSTEM_ERROR );
6728 if ( duplexPrerollBytes <= 0 ) {
6729 // Copy our buffer into the DS buffer
6730 CopyMemory( buffer, buffer1, bufferSize1 );
6731 if ( buffer2 != NULL ) CopyMemory( buffer+bufferSize1, buffer2, bufferSize2 );
6734 memset( buffer, 0, bufferSize1 );
6735 if ( buffer2 != NULL ) memset( buffer + bufferSize1, 0, bufferSize2 );
6736 duplexPrerollBytes -= bufferSize1 + bufferSize2;
6739 // Update our buffer offset and unlock sound buffer
6740 nextReadPointer = ( nextReadPointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
6741 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
6742 if ( FAILED( result ) ) {
6743 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking capture buffer!";
6744 errorText_ = errorStream_.str();
6745 MUTEX_UNLOCK( &stream_.mutex );
6746 error( RtAudioError::SYSTEM_ERROR );
6749 handle->bufferPointer[1] = nextReadPointer;
6751 // No byte swapping necessary in DirectSound implementation.
6753 // If necessary, convert 8-bit data from unsigned to signed.
6754 if ( stream_.deviceFormat[1] == RTAUDIO_SINT8 )
6755 for ( int j=0; j<bufferBytes; j++ ) buffer[j] = (signed char) ( buffer[j] - 128 );
6757 // Do buffer conversion if necessary.
6758 if ( stream_.doConvertBuffer[1] )
6759 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
6763 MUTEX_UNLOCK( &stream_.mutex );
6764 RtApi::tickStreamTime();
6767 // Definitions for utility functions and callbacks
6768 // specific to the DirectSound implementation.
6770 static unsigned __stdcall callbackHandler( void *ptr )
6772 CallbackInfo *info = (CallbackInfo *) ptr;
6773 RtApiDs *object = (RtApiDs *) info->object;
6774 bool* isRunning = &info->isRunning;
6776 while ( *isRunning == true ) {
6777 object->callbackEvent();
6784 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
6785 LPCTSTR description,
6789 struct DsProbeData& probeInfo = *(struct DsProbeData*) lpContext;
6790 std::vector<struct DsDevice>& dsDevices = *probeInfo.dsDevices;
6793 bool validDevice = false;
6794 if ( probeInfo.isInput == true ) {
6796 LPDIRECTSOUNDCAPTURE object;
6798 hr = DirectSoundCaptureCreate( lpguid, &object, NULL );
6799 if ( hr != DS_OK ) return TRUE;
6801 caps.dwSize = sizeof(caps);
6802 hr = object->GetCaps( &caps );
6803 if ( hr == DS_OK ) {
6804 if ( caps.dwChannels > 0 && caps.dwFormats > 0 )
6811 LPDIRECTSOUND object;
6812 hr = DirectSoundCreate( lpguid, &object, NULL );
6813 if ( hr != DS_OK ) return TRUE;
6815 caps.dwSize = sizeof(caps);
6816 hr = object->GetCaps( &caps );
6817 if ( hr == DS_OK ) {
6818 if ( caps.dwFlags & DSCAPS_PRIMARYMONO || caps.dwFlags & DSCAPS_PRIMARYSTEREO )
6824 // If good device, then save its name and guid.
6825 std::string name = convertCharPointerToStdString( description );
6826 //if ( name == "Primary Sound Driver" || name == "Primary Sound Capture Driver" )
6827 if ( lpguid == NULL )
6828 name = "Default Device";
6829 if ( validDevice ) {
6830 for ( unsigned int i=0; i<dsDevices.size(); i++ ) {
6831 if ( dsDevices[i].name == name ) {
6832 dsDevices[i].found = true;
6833 if ( probeInfo.isInput ) {
6834 dsDevices[i].id[1] = lpguid;
6835 dsDevices[i].validId[1] = true;
6838 dsDevices[i].id[0] = lpguid;
6839 dsDevices[i].validId[0] = true;
6847 device.found = true;
6848 if ( probeInfo.isInput ) {
6849 device.id[1] = lpguid;
6850 device.validId[1] = true;
6853 device.id[0] = lpguid;
6854 device.validId[0] = true;
6856 dsDevices.push_back( device );
6862 static const char* getErrorString( int code )
6866 case DSERR_ALLOCATED:
6867 return "Already allocated";
6869 case DSERR_CONTROLUNAVAIL:
6870 return "Control unavailable";
6872 case DSERR_INVALIDPARAM:
6873 return "Invalid parameter";
6875 case DSERR_INVALIDCALL:
6876 return "Invalid call";
6879 return "Generic error";
6881 case DSERR_PRIOLEVELNEEDED:
6882 return "Priority level needed";
6884 case DSERR_OUTOFMEMORY:
6885 return "Out of memory";
6887 case DSERR_BADFORMAT:
6888 return "The sample rate or the channel format is not supported";
6890 case DSERR_UNSUPPORTED:
6891 return "Not supported";
6893 case DSERR_NODRIVER:
6896 case DSERR_ALREADYINITIALIZED:
6897 return "Already initialized";
6899 case DSERR_NOAGGREGATION:
6900 return "No aggregation";
6902 case DSERR_BUFFERLOST:
6903 return "Buffer lost";
6905 case DSERR_OTHERAPPHASPRIO:
6906 return "Another application already has priority";
6908 case DSERR_UNINITIALIZED:
6909 return "Uninitialized";
6912 return "DirectSound unknown error";
6915 //******************** End of __WINDOWS_DS__ *********************//
6919 #if defined(__LINUX_ALSA__)
6921 #include <alsa/asoundlib.h>
6924 // A structure to hold various information related to the ALSA API
6927 snd_pcm_t *handles[2];
6930 pthread_cond_t runnable_cv;
6934 :synchronized(false), runnable(false) { xrun[0] = false; xrun[1] = false; }
6937 static void *alsaCallbackHandler( void * ptr );
6939 RtApiAlsa :: RtApiAlsa()
6941 // Nothing to do here.
6944 RtApiAlsa :: ~RtApiAlsa()
6946 if ( stream_.state != STREAM_CLOSED ) closeStream();
6949 unsigned int RtApiAlsa :: getDeviceCount( void )
6951 unsigned nDevices = 0;
6952 int result, subdevice, card;
6956 // Count cards and devices
6958 snd_card_next( &card );
6959 while ( card >= 0 ) {
6960 sprintf( name, "hw:%d", card );
6961 result = snd_ctl_open( &handle, name, 0 );
6963 errorStream_ << "RtApiAlsa::getDeviceCount: control open, card = " << card << ", " << snd_strerror( result ) << ".";
6964 errorText_ = errorStream_.str();
6965 error( RtAudioError::WARNING );
6970 result = snd_ctl_pcm_next_device( handle, &subdevice );
6972 errorStream_ << "RtApiAlsa::getDeviceCount: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
6973 errorText_ = errorStream_.str();
6974 error( RtAudioError::WARNING );
6977 if ( subdevice < 0 )
6982 snd_ctl_close( handle );
6983 snd_card_next( &card );
6986 result = snd_ctl_open( &handle, "default", 0 );
6989 snd_ctl_close( handle );
6995 RtAudio::DeviceInfo RtApiAlsa :: getDeviceInfo( unsigned int device )
6997 RtAudio::DeviceInfo info;
6998 info.probed = false;
7000 unsigned nDevices = 0;
7001 int result, subdevice, card;
7005 // Count cards and devices
7008 snd_card_next( &card );
7009 while ( card >= 0 ) {
7010 sprintf( name, "hw:%d", card );
7011 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
7013 errorStream_ << "RtApiAlsa::getDeviceInfo: control open, card = " << card << ", " << snd_strerror( result ) << ".";
7014 errorText_ = errorStream_.str();
7015 error( RtAudioError::WARNING );
7020 result = snd_ctl_pcm_next_device( chandle, &subdevice );
7022 errorStream_ << "RtApiAlsa::getDeviceInfo: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
7023 errorText_ = errorStream_.str();
7024 error( RtAudioError::WARNING );
7027 if ( subdevice < 0 ) break;
7028 if ( nDevices == device ) {
7029 sprintf( name, "hw:%d,%d", card, subdevice );
7035 snd_ctl_close( chandle );
7036 snd_card_next( &card );
7039 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
7040 if ( result == 0 ) {
7041 if ( nDevices == device ) {
7042 strcpy( name, "default" );
7048 if ( nDevices == 0 ) {
7049 errorText_ = "RtApiAlsa::getDeviceInfo: no devices found!";
7050 error( RtAudioError::INVALID_USE );
7054 if ( device >= nDevices ) {
7055 errorText_ = "RtApiAlsa::getDeviceInfo: device ID is invalid!";
7056 error( RtAudioError::INVALID_USE );
7062 // If a stream is already open, we cannot probe the stream devices.
7063 // Thus, use the saved results.
7064 if ( stream_.state != STREAM_CLOSED &&
7065 ( stream_.device[0] == device || stream_.device[1] == device ) ) {
7066 snd_ctl_close( chandle );
7067 if ( device >= devices_.size() ) {
7068 errorText_ = "RtApiAlsa::getDeviceInfo: device ID was not present before stream was opened.";
7069 error( RtAudioError::WARNING );
7072 return devices_[ device ];
7075 int openMode = SND_PCM_ASYNC;
7076 snd_pcm_stream_t stream;
7077 snd_pcm_info_t *pcminfo;
7078 snd_pcm_info_alloca( &pcminfo );
7080 snd_pcm_hw_params_t *params;
7081 snd_pcm_hw_params_alloca( ¶ms );
7083 // First try for playback unless default device (which has subdev -1)
7084 stream = SND_PCM_STREAM_PLAYBACK;
7085 snd_pcm_info_set_stream( pcminfo, stream );
7086 if ( subdevice != -1 ) {
7087 snd_pcm_info_set_device( pcminfo, subdevice );
7088 snd_pcm_info_set_subdevice( pcminfo, 0 );
7090 result = snd_ctl_pcm_info( chandle, pcminfo );
7092 // Device probably doesn't support playback.
7097 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK );
7099 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7100 errorText_ = errorStream_.str();
7101 error( RtAudioError::WARNING );
7105 // The device is open ... fill the parameter structure.
7106 result = snd_pcm_hw_params_any( phandle, params );
7108 snd_pcm_close( phandle );
7109 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7110 errorText_ = errorStream_.str();
7111 error( RtAudioError::WARNING );
7115 // Get output channel information.
7117 result = snd_pcm_hw_params_get_channels_max( params, &value );
7119 snd_pcm_close( phandle );
7120 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") output channels, " << snd_strerror( result ) << ".";
7121 errorText_ = errorStream_.str();
7122 error( RtAudioError::WARNING );
7125 info.outputChannels = value;
7126 snd_pcm_close( phandle );
7129 stream = SND_PCM_STREAM_CAPTURE;
7130 snd_pcm_info_set_stream( pcminfo, stream );
7132 // Now try for capture unless default device (with subdev = -1)
7133 if ( subdevice != -1 ) {
7134 result = snd_ctl_pcm_info( chandle, pcminfo );
7135 snd_ctl_close( chandle );
7137 // Device probably doesn't support capture.
7138 if ( info.outputChannels == 0 ) return info;
7139 goto probeParameters;
7143 snd_ctl_close( chandle );
7145 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
7147 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7148 errorText_ = errorStream_.str();
7149 error( RtAudioError::WARNING );
7150 if ( info.outputChannels == 0 ) return info;
7151 goto probeParameters;
7154 // The device is open ... fill the parameter structure.
7155 result = snd_pcm_hw_params_any( phandle, params );
7157 snd_pcm_close( phandle );
7158 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7159 errorText_ = errorStream_.str();
7160 error( RtAudioError::WARNING );
7161 if ( info.outputChannels == 0 ) return info;
7162 goto probeParameters;
7165 result = snd_pcm_hw_params_get_channels_max( params, &value );
7167 snd_pcm_close( phandle );
7168 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") input channels, " << snd_strerror( result ) << ".";
7169 errorText_ = errorStream_.str();
7170 error( RtAudioError::WARNING );
7171 if ( info.outputChannels == 0 ) return info;
7172 goto probeParameters;
7174 info.inputChannels = value;
7175 snd_pcm_close( phandle );
7177 // If device opens for both playback and capture, we determine the channels.
7178 if ( info.outputChannels > 0 && info.inputChannels > 0 )
7179 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
7181 // ALSA doesn't provide default devices so we'll use the first available one.
7182 if ( device == 0 && info.outputChannels > 0 )
7183 info.isDefaultOutput = true;
7184 if ( device == 0 && info.inputChannels > 0 )
7185 info.isDefaultInput = true;
7188 // At this point, we just need to figure out the supported data
7189 // formats and sample rates. We'll proceed by opening the device in
7190 // the direction with the maximum number of channels, or playback if
7191 // they are equal. This might limit our sample rate options, but so
7194 if ( info.outputChannels >= info.inputChannels )
7195 stream = SND_PCM_STREAM_PLAYBACK;
7197 stream = SND_PCM_STREAM_CAPTURE;
7198 snd_pcm_info_set_stream( pcminfo, stream );
7200 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
7202 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7203 errorText_ = errorStream_.str();
7204 error( RtAudioError::WARNING );
7208 // The device is open ... fill the parameter structure.
7209 result = snd_pcm_hw_params_any( phandle, params );
7211 snd_pcm_close( phandle );
7212 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7213 errorText_ = errorStream_.str();
7214 error( RtAudioError::WARNING );
7218 // Test our discrete set of sample rate values.
7219 info.sampleRates.clear();
7220 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
7221 if ( snd_pcm_hw_params_test_rate( phandle, params, SAMPLE_RATES[i], 0 ) == 0 ) {
7222 info.sampleRates.push_back( SAMPLE_RATES[i] );
7224 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
7225 info.preferredSampleRate = SAMPLE_RATES[i];
7228 if ( info.sampleRates.size() == 0 ) {
7229 snd_pcm_close( phandle );
7230 errorStream_ << "RtApiAlsa::getDeviceInfo: no supported sample rates found for device (" << name << ").";
7231 errorText_ = errorStream_.str();
7232 error( RtAudioError::WARNING );
7236 // Probe the supported data formats ... we don't care about endian-ness just yet
7237 snd_pcm_format_t format;
7238 info.nativeFormats = 0;
7239 format = SND_PCM_FORMAT_S8;
7240 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7241 info.nativeFormats |= RTAUDIO_SINT8;
7242 format = SND_PCM_FORMAT_S16;
7243 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7244 info.nativeFormats |= RTAUDIO_SINT16;
7245 format = SND_PCM_FORMAT_S24;
7246 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7247 info.nativeFormats |= RTAUDIO_SINT24;
7248 format = SND_PCM_FORMAT_S32;
7249 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7250 info.nativeFormats |= RTAUDIO_SINT32;
7251 format = SND_PCM_FORMAT_FLOAT;
7252 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7253 info.nativeFormats |= RTAUDIO_FLOAT32;
7254 format = SND_PCM_FORMAT_FLOAT64;
7255 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7256 info.nativeFormats |= RTAUDIO_FLOAT64;
7258 // Check that we have at least one supported format
7259 if ( info.nativeFormats == 0 ) {
7260 snd_pcm_close( phandle );
7261 errorStream_ << "RtApiAlsa::getDeviceInfo: pcm device (" << name << ") data format not supported by RtAudio.";
7262 errorText_ = errorStream_.str();
7263 error( RtAudioError::WARNING );
7267 // Get the device name
7269 result = snd_card_get_name( card, &cardname );
7270 if ( result >= 0 ) {
7271 sprintf( name, "hw:%s,%d", cardname, subdevice );
7276 // That's all ... close the device and return
7277 snd_pcm_close( phandle );
7282 void RtApiAlsa :: saveDeviceInfo( void )
7286 unsigned int nDevices = getDeviceCount();
7287 devices_.resize( nDevices );
7288 for ( unsigned int i=0; i<nDevices; i++ )
7289 devices_[i] = getDeviceInfo( i );
7292 bool RtApiAlsa :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
7293 unsigned int firstChannel, unsigned int sampleRate,
7294 RtAudioFormat format, unsigned int *bufferSize,
7295 RtAudio::StreamOptions *options )
7298 #if defined(__RTAUDIO_DEBUG__)
7300 snd_output_stdio_attach(&out, stderr, 0);
7303 // I'm not using the "plug" interface ... too much inconsistent behavior.
7305 unsigned nDevices = 0;
7306 int result, subdevice, card;
7310 if ( options && options->flags & RTAUDIO_ALSA_USE_DEFAULT )
7311 snprintf(name, sizeof(name), "%s", "default");
7313 // Count cards and devices
7315 snd_card_next( &card );
7316 while ( card >= 0 ) {
7317 sprintf( name, "hw:%d", card );
7318 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
7320 errorStream_ << "RtApiAlsa::probeDeviceOpen: control open, card = " << card << ", " << snd_strerror( result ) << ".";
7321 errorText_ = errorStream_.str();
7326 result = snd_ctl_pcm_next_device( chandle, &subdevice );
7327 if ( result < 0 ) break;
7328 if ( subdevice < 0 ) break;
7329 if ( nDevices == device ) {
7330 sprintf( name, "hw:%d,%d", card, subdevice );
7331 snd_ctl_close( chandle );
7336 snd_ctl_close( chandle );
7337 snd_card_next( &card );
7340 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
7341 if ( result == 0 ) {
7342 if ( nDevices == device ) {
7343 strcpy( name, "default" );
7349 if ( nDevices == 0 ) {
7350 // This should not happen because a check is made before this function is called.
7351 errorText_ = "RtApiAlsa::probeDeviceOpen: no devices found!";
7355 if ( device >= nDevices ) {
7356 // This should not happen because a check is made before this function is called.
7357 errorText_ = "RtApiAlsa::probeDeviceOpen: device ID is invalid!";
7364 // The getDeviceInfo() function will not work for a device that is
7365 // already open. Thus, we'll probe the system before opening a
7366 // stream and save the results for use by getDeviceInfo().
7367 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) // only do once
7368 this->saveDeviceInfo();
7370 snd_pcm_stream_t stream;
7371 if ( mode == OUTPUT )
7372 stream = SND_PCM_STREAM_PLAYBACK;
7374 stream = SND_PCM_STREAM_CAPTURE;
7377 int openMode = SND_PCM_ASYNC;
7378 result = snd_pcm_open( &phandle, name, stream, openMode );
7380 if ( mode == OUTPUT )
7381 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for output.";
7383 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for input.";
7384 errorText_ = errorStream_.str();
7388 // Fill the parameter structure.
7389 snd_pcm_hw_params_t *hw_params;
7390 snd_pcm_hw_params_alloca( &hw_params );
7391 result = snd_pcm_hw_params_any( phandle, hw_params );
7393 snd_pcm_close( phandle );
7394 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") parameters, " << snd_strerror( result ) << ".";
7395 errorText_ = errorStream_.str();
7399 #if defined(__RTAUDIO_DEBUG__)
7400 fprintf( stderr, "\nRtApiAlsa: dump hardware params just after device open:\n\n" );
7401 snd_pcm_hw_params_dump( hw_params, out );
7404 // Set access ... check user preference.
7405 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) {
7406 stream_.userInterleaved = false;
7407 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
7409 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
7410 stream_.deviceInterleaved[mode] = true;
7413 stream_.deviceInterleaved[mode] = false;
7416 stream_.userInterleaved = true;
7417 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
7419 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
7420 stream_.deviceInterleaved[mode] = false;
7423 stream_.deviceInterleaved[mode] = true;
7427 snd_pcm_close( phandle );
7428 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") access, " << snd_strerror( result ) << ".";
7429 errorText_ = errorStream_.str();
7433 // Determine how to set the device format.
7434 stream_.userFormat = format;
7435 snd_pcm_format_t deviceFormat = SND_PCM_FORMAT_UNKNOWN;
7437 if ( format == RTAUDIO_SINT8 )
7438 deviceFormat = SND_PCM_FORMAT_S8;
7439 else if ( format == RTAUDIO_SINT16 )
7440 deviceFormat = SND_PCM_FORMAT_S16;
7441 else if ( format == RTAUDIO_SINT24 )
7442 deviceFormat = SND_PCM_FORMAT_S24;
7443 else if ( format == RTAUDIO_SINT32 )
7444 deviceFormat = SND_PCM_FORMAT_S32;
7445 else if ( format == RTAUDIO_FLOAT32 )
7446 deviceFormat = SND_PCM_FORMAT_FLOAT;
7447 else if ( format == RTAUDIO_FLOAT64 )
7448 deviceFormat = SND_PCM_FORMAT_FLOAT64;
7450 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat) == 0) {
7451 stream_.deviceFormat[mode] = format;
7455 // The user requested format is not natively supported by the device.
7456 deviceFormat = SND_PCM_FORMAT_FLOAT64;
7457 if ( snd_pcm_hw_params_test_format( phandle, hw_params, deviceFormat ) == 0 ) {
7458 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
7462 deviceFormat = SND_PCM_FORMAT_FLOAT;
7463 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7464 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
7468 deviceFormat = SND_PCM_FORMAT_S32;
7469 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7470 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
7474 deviceFormat = SND_PCM_FORMAT_S24;
7475 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7476 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
7480 deviceFormat = SND_PCM_FORMAT_S16;
7481 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7482 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
7486 deviceFormat = SND_PCM_FORMAT_S8;
7487 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7488 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
7492 // If we get here, no supported format was found.
7493 snd_pcm_close( phandle );
7494 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device " << device << " data format not supported by RtAudio.";
7495 errorText_ = errorStream_.str();
7499 result = snd_pcm_hw_params_set_format( phandle, hw_params, deviceFormat );
7501 snd_pcm_close( phandle );
7502 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") data format, " << snd_strerror( result ) << ".";
7503 errorText_ = errorStream_.str();
7507 // Determine whether byte-swaping is necessary.
7508 stream_.doByteSwap[mode] = false;
7509 if ( deviceFormat != SND_PCM_FORMAT_S8 ) {
7510 result = snd_pcm_format_cpu_endian( deviceFormat );
7512 stream_.doByteSwap[mode] = true;
7513 else if (result < 0) {
7514 snd_pcm_close( phandle );
7515 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") endian-ness, " << snd_strerror( result ) << ".";
7516 errorText_ = errorStream_.str();
7521 // Set the sample rate.
7522 result = snd_pcm_hw_params_set_rate_near( phandle, hw_params, (unsigned int*) &sampleRate, 0 );
7524 snd_pcm_close( phandle );
7525 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting sample rate on device (" << name << "), " << snd_strerror( result ) << ".";
7526 errorText_ = errorStream_.str();
7530 // Determine the number of channels for this device. We support a possible
7531 // minimum device channel number > than the value requested by the user.
7532 stream_.nUserChannels[mode] = channels;
7534 result = snd_pcm_hw_params_get_channels_max( hw_params, &value );
7535 unsigned int deviceChannels = value;
7536 if ( result < 0 || deviceChannels < channels + firstChannel ) {
7537 snd_pcm_close( phandle );
7538 errorStream_ << "RtApiAlsa::probeDeviceOpen: requested channel parameters not supported by device (" << name << "), " << snd_strerror( result ) << ".";
7539 errorText_ = errorStream_.str();
7543 result = snd_pcm_hw_params_get_channels_min( hw_params, &value );
7545 snd_pcm_close( phandle );
7546 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting minimum channels for device (" << name << "), " << snd_strerror( result ) << ".";
7547 errorText_ = errorStream_.str();
7550 deviceChannels = value;
7551 if ( deviceChannels < channels + firstChannel ) deviceChannels = channels + firstChannel;
7552 stream_.nDeviceChannels[mode] = deviceChannels;
7554 // Set the device channels.
7555 result = snd_pcm_hw_params_set_channels( phandle, hw_params, deviceChannels );
7557 snd_pcm_close( phandle );
7558 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting channels for device (" << name << "), " << snd_strerror( result ) << ".";
7559 errorText_ = errorStream_.str();
7563 // Set the buffer (or period) size.
7565 snd_pcm_uframes_t periodSize = *bufferSize;
7566 result = snd_pcm_hw_params_set_period_size_near( phandle, hw_params, &periodSize, &dir );
7568 snd_pcm_close( phandle );
7569 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting period size for device (" << name << "), " << snd_strerror( result ) << ".";
7570 errorText_ = errorStream_.str();
7573 *bufferSize = periodSize;
7575 // Set the buffer number, which in ALSA is referred to as the "period".
7576 unsigned int periods = 0;
7577 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) periods = 2;
7578 if ( options && options->numberOfBuffers > 0 ) periods = options->numberOfBuffers;
7579 if ( periods < 2 ) periods = 4; // a fairly safe default value
7580 result = snd_pcm_hw_params_set_periods_near( phandle, hw_params, &periods, &dir );
7582 snd_pcm_close( phandle );
7583 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting periods for device (" << name << "), " << snd_strerror( result ) << ".";
7584 errorText_ = errorStream_.str();
7588 // If attempting to setup a duplex stream, the bufferSize parameter
7589 // MUST be the same in both directions!
7590 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
7591 snd_pcm_close( phandle );
7592 errorStream_ << "RtApiAlsa::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << name << ").";
7593 errorText_ = errorStream_.str();
7597 stream_.bufferSize = *bufferSize;
7599 // Install the hardware configuration
7600 result = snd_pcm_hw_params( phandle, hw_params );
7602 snd_pcm_close( phandle );
7603 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing hardware configuration on device (" << name << "), " << snd_strerror( result ) << ".";
7604 errorText_ = errorStream_.str();
7608 #if defined(__RTAUDIO_DEBUG__)
7609 fprintf(stderr, "\nRtApiAlsa: dump hardware params after installation:\n\n");
7610 snd_pcm_hw_params_dump( hw_params, out );
7613 // Set the software configuration to fill buffers with zeros and prevent device stopping on xruns.
7614 snd_pcm_sw_params_t *sw_params = NULL;
7615 snd_pcm_sw_params_alloca( &sw_params );
7616 snd_pcm_sw_params_current( phandle, sw_params );
7617 snd_pcm_sw_params_set_start_threshold( phandle, sw_params, *bufferSize );
7618 snd_pcm_sw_params_set_stop_threshold( phandle, sw_params, ULONG_MAX );
7619 snd_pcm_sw_params_set_silence_threshold( phandle, sw_params, 0 );
7621 // The following two settings were suggested by Theo Veenker
7622 //snd_pcm_sw_params_set_avail_min( phandle, sw_params, *bufferSize );
7623 //snd_pcm_sw_params_set_xfer_align( phandle, sw_params, 1 );
7625 // here are two options for a fix
7626 //snd_pcm_sw_params_set_silence_size( phandle, sw_params, ULONG_MAX );
7627 snd_pcm_uframes_t val;
7628 snd_pcm_sw_params_get_boundary( sw_params, &val );
7629 snd_pcm_sw_params_set_silence_size( phandle, sw_params, val );
7631 result = snd_pcm_sw_params( phandle, sw_params );
7633 snd_pcm_close( phandle );
7634 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing software configuration on device (" << name << "), " << snd_strerror( result ) << ".";
7635 errorText_ = errorStream_.str();
7639 #if defined(__RTAUDIO_DEBUG__)
7640 fprintf(stderr, "\nRtApiAlsa: dump software params after installation:\n\n");
7641 snd_pcm_sw_params_dump( sw_params, out );
7644 // Set flags for buffer conversion
7645 stream_.doConvertBuffer[mode] = false;
7646 if ( stream_.userFormat != stream_.deviceFormat[mode] )
7647 stream_.doConvertBuffer[mode] = true;
7648 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
7649 stream_.doConvertBuffer[mode] = true;
7650 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
7651 stream_.nUserChannels[mode] > 1 )
7652 stream_.doConvertBuffer[mode] = true;
7654 // Allocate the ApiHandle if necessary and then save.
7655 AlsaHandle *apiInfo = 0;
7656 if ( stream_.apiHandle == 0 ) {
7658 apiInfo = (AlsaHandle *) new AlsaHandle;
7660 catch ( std::bad_alloc& ) {
7661 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating AlsaHandle memory.";
7665 if ( pthread_cond_init( &apiInfo->runnable_cv, NULL ) ) {
7666 errorText_ = "RtApiAlsa::probeDeviceOpen: error initializing pthread condition variable.";
7670 stream_.apiHandle = (void *) apiInfo;
7671 apiInfo->handles[0] = 0;
7672 apiInfo->handles[1] = 0;
7675 apiInfo = (AlsaHandle *) stream_.apiHandle;
7677 apiInfo->handles[mode] = phandle;
7680 // Allocate necessary internal buffers.
7681 unsigned long bufferBytes;
7682 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
7683 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
7684 if ( stream_.userBuffer[mode] == NULL ) {
7685 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating user buffer memory.";
7689 if ( stream_.doConvertBuffer[mode] ) {
7691 bool makeBuffer = true;
7692 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
7693 if ( mode == INPUT ) {
7694 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
7695 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
7696 if ( bufferBytes <= bytesOut ) makeBuffer = false;
7701 bufferBytes *= *bufferSize;
7702 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
7703 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
7704 if ( stream_.deviceBuffer == NULL ) {
7705 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating device buffer memory.";
7711 stream_.sampleRate = sampleRate;
7712 stream_.nBuffers = periods;
7713 stream_.device[mode] = device;
7714 stream_.state = STREAM_STOPPED;
7716 // Setup the buffer conversion information structure.
7717 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
7719 // Setup thread if necessary.
7720 if ( stream_.mode == OUTPUT && mode == INPUT ) {
7721 // We had already set up an output stream.
7722 stream_.mode = DUPLEX;
7723 // Link the streams if possible.
7724 apiInfo->synchronized = false;
7725 if ( snd_pcm_link( apiInfo->handles[0], apiInfo->handles[1] ) == 0 )
7726 apiInfo->synchronized = true;
7728 errorText_ = "RtApiAlsa::probeDeviceOpen: unable to synchronize input and output devices.";
7729 error( RtAudioError::WARNING );
7733 stream_.mode = mode;
7735 // Setup callback thread.
7736 stream_.callbackInfo.object = (void *) this;
7738 // Set the thread attributes for joinable and realtime scheduling
7739 // priority (optional). The higher priority will only take affect
7740 // if the program is run as root or suid. Note, under Linux
7741 // processes with CAP_SYS_NICE privilege, a user can change
7742 // scheduling policy and priority (thus need not be root). See
7743 // POSIX "capabilities".
7744 pthread_attr_t attr;
7745 pthread_attr_init( &attr );
7746 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
7748 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
7749 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
7750 // We previously attempted to increase the audio callback priority
7751 // to SCHED_RR here via the attributes. However, while no errors
7752 // were reported in doing so, it did not work. So, now this is
7753 // done in the alsaCallbackHandler function.
7754 stream_.callbackInfo.doRealtime = true;
7755 int priority = options->priority;
7756 int min = sched_get_priority_min( SCHED_RR );
7757 int max = sched_get_priority_max( SCHED_RR );
7758 if ( priority < min ) priority = min;
7759 else if ( priority > max ) priority = max;
7760 stream_.callbackInfo.priority = priority;
7764 stream_.callbackInfo.isRunning = true;
7765 result = pthread_create( &stream_.callbackInfo.thread, &attr, alsaCallbackHandler, &stream_.callbackInfo );
7766 pthread_attr_destroy( &attr );
7768 stream_.callbackInfo.isRunning = false;
7769 errorText_ = "RtApiAlsa::error creating callback thread!";
7778 pthread_cond_destroy( &apiInfo->runnable_cv );
7779 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
7780 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
7782 stream_.apiHandle = 0;
7785 if ( phandle) snd_pcm_close( phandle );
7787 for ( int i=0; i<2; i++ ) {
7788 if ( stream_.userBuffer[i] ) {
7789 free( stream_.userBuffer[i] );
7790 stream_.userBuffer[i] = 0;
7794 if ( stream_.deviceBuffer ) {
7795 free( stream_.deviceBuffer );
7796 stream_.deviceBuffer = 0;
7799 stream_.state = STREAM_CLOSED;
7803 void RtApiAlsa :: closeStream()
7805 if ( stream_.state == STREAM_CLOSED ) {
7806 errorText_ = "RtApiAlsa::closeStream(): no open stream to close!";
7807 error( RtAudioError::WARNING );
7811 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
7812 stream_.callbackInfo.isRunning = false;
7813 MUTEX_LOCK( &stream_.mutex );
7814 if ( stream_.state == STREAM_STOPPED ) {
7815 apiInfo->runnable = true;
7816 pthread_cond_signal( &apiInfo->runnable_cv );
7818 MUTEX_UNLOCK( &stream_.mutex );
7819 pthread_join( stream_.callbackInfo.thread, NULL );
7821 if ( stream_.state == STREAM_RUNNING ) {
7822 stream_.state = STREAM_STOPPED;
7823 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
7824 snd_pcm_drop( apiInfo->handles[0] );
7825 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
7826 snd_pcm_drop( apiInfo->handles[1] );
7830 pthread_cond_destroy( &apiInfo->runnable_cv );
7831 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
7832 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
7834 stream_.apiHandle = 0;
7837 for ( int i=0; i<2; i++ ) {
7838 if ( stream_.userBuffer[i] ) {
7839 free( stream_.userBuffer[i] );
7840 stream_.userBuffer[i] = 0;
7844 if ( stream_.deviceBuffer ) {
7845 free( stream_.deviceBuffer );
7846 stream_.deviceBuffer = 0;
7849 stream_.mode = UNINITIALIZED;
7850 stream_.state = STREAM_CLOSED;
7853 void RtApiAlsa :: startStream()
7855 // This method calls snd_pcm_prepare if the device isn't already in that state.
7858 RtApi::startStream();
7859 if ( stream_.state == STREAM_RUNNING ) {
7860 errorText_ = "RtApiAlsa::startStream(): the stream is already running!";
7861 error( RtAudioError::WARNING );
7865 MUTEX_LOCK( &stream_.mutex );
7868 snd_pcm_state_t state;
7869 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
7870 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
7871 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
7872 state = snd_pcm_state( handle[0] );
7873 if ( state != SND_PCM_STATE_PREPARED ) {
7874 result = snd_pcm_prepare( handle[0] );
7876 errorStream_ << "RtApiAlsa::startStream: error preparing output pcm device, " << snd_strerror( result ) << ".";
7877 errorText_ = errorStream_.str();
7883 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
7884 result = snd_pcm_drop(handle[1]); // fix to remove stale data received since device has been open
7885 state = snd_pcm_state( handle[1] );
7886 if ( state != SND_PCM_STATE_PREPARED ) {
7887 result = snd_pcm_prepare( handle[1] );
7889 errorStream_ << "RtApiAlsa::startStream: error preparing input pcm device, " << snd_strerror( result ) << ".";
7890 errorText_ = errorStream_.str();
7896 stream_.state = STREAM_RUNNING;
7899 apiInfo->runnable = true;
7900 pthread_cond_signal( &apiInfo->runnable_cv );
7901 MUTEX_UNLOCK( &stream_.mutex );
7903 if ( result >= 0 ) return;
7904 error( RtAudioError::SYSTEM_ERROR );
7907 void RtApiAlsa :: stopStream()
7910 if ( stream_.state == STREAM_STOPPED ) {
7911 errorText_ = "RtApiAlsa::stopStream(): the stream is already stopped!";
7912 error( RtAudioError::WARNING );
7916 stream_.state = STREAM_STOPPED;
7917 MUTEX_LOCK( &stream_.mutex );
7920 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
7921 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
7922 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
7923 if ( apiInfo->synchronized )
7924 result = snd_pcm_drop( handle[0] );
7926 result = snd_pcm_drain( handle[0] );
7928 errorStream_ << "RtApiAlsa::stopStream: error draining output pcm device, " << snd_strerror( result ) << ".";
7929 errorText_ = errorStream_.str();
7934 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
7935 result = snd_pcm_drop( handle[1] );
7937 errorStream_ << "RtApiAlsa::stopStream: error stopping input pcm device, " << snd_strerror( result ) << ".";
7938 errorText_ = errorStream_.str();
7944 apiInfo->runnable = false; // fixes high CPU usage when stopped
7945 MUTEX_UNLOCK( &stream_.mutex );
7947 if ( result >= 0 ) return;
7948 error( RtAudioError::SYSTEM_ERROR );
7951 void RtApiAlsa :: abortStream()
7954 if ( stream_.state == STREAM_STOPPED ) {
7955 errorText_ = "RtApiAlsa::abortStream(): the stream is already stopped!";
7956 error( RtAudioError::WARNING );
7960 stream_.state = STREAM_STOPPED;
7961 MUTEX_LOCK( &stream_.mutex );
7964 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
7965 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
7966 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
7967 result = snd_pcm_drop( handle[0] );
7969 errorStream_ << "RtApiAlsa::abortStream: error aborting output pcm device, " << snd_strerror( result ) << ".";
7970 errorText_ = errorStream_.str();
7975 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
7976 result = snd_pcm_drop( handle[1] );
7978 errorStream_ << "RtApiAlsa::abortStream: error aborting input pcm device, " << snd_strerror( result ) << ".";
7979 errorText_ = errorStream_.str();
7985 apiInfo->runnable = false; // fixes high CPU usage when stopped
7986 MUTEX_UNLOCK( &stream_.mutex );
7988 if ( result >= 0 ) return;
7989 error( RtAudioError::SYSTEM_ERROR );
7992 void RtApiAlsa :: callbackEvent()
7994 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
7995 if ( stream_.state == STREAM_STOPPED ) {
7996 MUTEX_LOCK( &stream_.mutex );
7997 while ( !apiInfo->runnable )
7998 pthread_cond_wait( &apiInfo->runnable_cv, &stream_.mutex );
8000 if ( stream_.state != STREAM_RUNNING ) {
8001 MUTEX_UNLOCK( &stream_.mutex );
8004 MUTEX_UNLOCK( &stream_.mutex );
8007 if ( stream_.state == STREAM_CLOSED ) {
8008 errorText_ = "RtApiAlsa::callbackEvent(): the stream is closed ... this shouldn't happen!";
8009 error( RtAudioError::WARNING );
8013 int doStopStream = 0;
8014 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
8015 double streamTime = getStreamTime();
8016 RtAudioStreamStatus status = 0;
8017 if ( stream_.mode != INPUT && apiInfo->xrun[0] == true ) {
8018 status |= RTAUDIO_OUTPUT_UNDERFLOW;
8019 apiInfo->xrun[0] = false;
8021 if ( stream_.mode != OUTPUT && apiInfo->xrun[1] == true ) {
8022 status |= RTAUDIO_INPUT_OVERFLOW;
8023 apiInfo->xrun[1] = false;
8025 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
8026 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
8028 if ( doStopStream == 2 ) {
8033 MUTEX_LOCK( &stream_.mutex );
8035 // The state might change while waiting on a mutex.
8036 if ( stream_.state == STREAM_STOPPED ) goto unlock;
8042 snd_pcm_sframes_t frames;
8043 RtAudioFormat format;
8044 handle = (snd_pcm_t **) apiInfo->handles;
8046 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
8048 // Setup parameters.
8049 if ( stream_.doConvertBuffer[1] ) {
8050 buffer = stream_.deviceBuffer;
8051 channels = stream_.nDeviceChannels[1];
8052 format = stream_.deviceFormat[1];
8055 buffer = stream_.userBuffer[1];
8056 channels = stream_.nUserChannels[1];
8057 format = stream_.userFormat;
8060 // Read samples from device in interleaved/non-interleaved format.
8061 if ( stream_.deviceInterleaved[1] )
8062 result = snd_pcm_readi( handle[1], buffer, stream_.bufferSize );
8064 void *bufs[channels];
8065 size_t offset = stream_.bufferSize * formatBytes( format );
8066 for ( int i=0; i<channels; i++ )
8067 bufs[i] = (void *) (buffer + (i * offset));
8068 result = snd_pcm_readn( handle[1], bufs, stream_.bufferSize );
8071 if ( result < (int) stream_.bufferSize ) {
8072 // Either an error or overrun occured.
8073 if ( result == -EPIPE ) {
8074 snd_pcm_state_t state = snd_pcm_state( handle[1] );
8075 if ( state == SND_PCM_STATE_XRUN ) {
8076 apiInfo->xrun[1] = true;
8077 result = snd_pcm_prepare( handle[1] );
8079 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after overrun, " << snd_strerror( result ) << ".";
8080 errorText_ = errorStream_.str();
8084 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
8085 errorText_ = errorStream_.str();
8089 errorStream_ << "RtApiAlsa::callbackEvent: audio read error, " << snd_strerror( result ) << ".";
8090 errorText_ = errorStream_.str();
8092 error( RtAudioError::WARNING );
8096 // Do byte swapping if necessary.
8097 if ( stream_.doByteSwap[1] )
8098 byteSwapBuffer( buffer, stream_.bufferSize * channels, format );
8100 // Do buffer conversion if necessary.
8101 if ( stream_.doConvertBuffer[1] )
8102 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
8104 // Check stream latency
8105 result = snd_pcm_delay( handle[1], &frames );
8106 if ( result == 0 && frames > 0 ) stream_.latency[1] = frames;
8111 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8113 // Setup parameters and do buffer conversion if necessary.
8114 if ( stream_.doConvertBuffer[0] ) {
8115 buffer = stream_.deviceBuffer;
8116 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
8117 channels = stream_.nDeviceChannels[0];
8118 format = stream_.deviceFormat[0];
8121 buffer = stream_.userBuffer[0];
8122 channels = stream_.nUserChannels[0];
8123 format = stream_.userFormat;
8126 // Do byte swapping if necessary.
8127 if ( stream_.doByteSwap[0] )
8128 byteSwapBuffer(buffer, stream_.bufferSize * channels, format);
8130 // Write samples to device in interleaved/non-interleaved format.
8131 if ( stream_.deviceInterleaved[0] )
8132 result = snd_pcm_writei( handle[0], buffer, stream_.bufferSize );
8134 void *bufs[channels];
8135 size_t offset = stream_.bufferSize * formatBytes( format );
8136 for ( int i=0; i<channels; i++ )
8137 bufs[i] = (void *) (buffer + (i * offset));
8138 result = snd_pcm_writen( handle[0], bufs, stream_.bufferSize );
8141 if ( result < (int) stream_.bufferSize ) {
8142 // Either an error or underrun occured.
8143 if ( result == -EPIPE ) {
8144 snd_pcm_state_t state = snd_pcm_state( handle[0] );
8145 if ( state == SND_PCM_STATE_XRUN ) {
8146 apiInfo->xrun[0] = true;
8147 result = snd_pcm_prepare( handle[0] );
8149 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after underrun, " << snd_strerror( result ) << ".";
8150 errorText_ = errorStream_.str();
8153 errorText_ = "RtApiAlsa::callbackEvent: audio write error, underrun.";
8156 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
8157 errorText_ = errorStream_.str();
8161 errorStream_ << "RtApiAlsa::callbackEvent: audio write error, " << snd_strerror( result ) << ".";
8162 errorText_ = errorStream_.str();
8164 error( RtAudioError::WARNING );
8168 // Check stream latency
8169 result = snd_pcm_delay( handle[0], &frames );
8170 if ( result == 0 && frames > 0 ) stream_.latency[0] = frames;
8174 MUTEX_UNLOCK( &stream_.mutex );
8176 RtApi::tickStreamTime();
8177 if ( doStopStream == 1 ) this->stopStream();
8180 static void *alsaCallbackHandler( void *ptr )
8182 CallbackInfo *info = (CallbackInfo *) ptr;
8183 RtApiAlsa *object = (RtApiAlsa *) info->object;
8184 bool *isRunning = &info->isRunning;
8186 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
8187 if ( info->doRealtime ) {
8188 pthread_t tID = pthread_self(); // ID of this thread
8189 sched_param prio = { info->priority }; // scheduling priority of thread
8190 pthread_setschedparam( tID, SCHED_RR, &prio );
8194 while ( *isRunning == true ) {
8195 pthread_testcancel();
8196 object->callbackEvent();
8199 pthread_exit( NULL );
8202 //******************** End of __LINUX_ALSA__ *********************//
8205 #if defined(__LINUX_PULSE__)
8207 // Code written by Peter Meerwald, pmeerw@pmeerw.net
8208 // and Tristan Matthews.
8210 #include <pulse/error.h>
8211 #include <pulse/simple.h>
8214 static const unsigned int SUPPORTED_SAMPLERATES[] = { 8000, 16000, 22050, 32000,
8215 44100, 48000, 96000, 0};
8217 struct rtaudio_pa_format_mapping_t {
8218 RtAudioFormat rtaudio_format;
8219 pa_sample_format_t pa_format;
8222 static const rtaudio_pa_format_mapping_t supported_sampleformats[] = {
8223 {RTAUDIO_SINT16, PA_SAMPLE_S16LE},
8224 {RTAUDIO_SINT32, PA_SAMPLE_S32LE},
8225 {RTAUDIO_FLOAT32, PA_SAMPLE_FLOAT32LE},
8226 {0, PA_SAMPLE_INVALID}};
8228 struct PulseAudioHandle {
8232 pthread_cond_t runnable_cv;
8234 PulseAudioHandle() : s_play(0), s_rec(0), runnable(false) { }
8237 RtApiPulse::~RtApiPulse()
8239 if ( stream_.state != STREAM_CLOSED )
8243 unsigned int RtApiPulse::getDeviceCount( void )
8248 RtAudio::DeviceInfo RtApiPulse::getDeviceInfo( unsigned int /*device*/ )
8250 RtAudio::DeviceInfo info;
8252 info.name = "PulseAudio";
8253 info.outputChannels = 2;
8254 info.inputChannels = 2;
8255 info.duplexChannels = 2;
8256 info.isDefaultOutput = true;
8257 info.isDefaultInput = true;
8259 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr )
8260 info.sampleRates.push_back( *sr );
8262 info.preferredSampleRate = 48000;
8263 info.nativeFormats = RTAUDIO_SINT16 | RTAUDIO_SINT32 | RTAUDIO_FLOAT32;
8268 static void *pulseaudio_callback( void * user )
8270 CallbackInfo *cbi = static_cast<CallbackInfo *>( user );
8271 RtApiPulse *context = static_cast<RtApiPulse *>( cbi->object );
8272 volatile bool *isRunning = &cbi->isRunning;
8274 while ( *isRunning ) {
8275 pthread_testcancel();
8276 context->callbackEvent();
8279 pthread_exit( NULL );
8282 void RtApiPulse::closeStream( void )
8284 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8286 stream_.callbackInfo.isRunning = false;
8288 MUTEX_LOCK( &stream_.mutex );
8289 if ( stream_.state == STREAM_STOPPED ) {
8290 pah->runnable = true;
8291 pthread_cond_signal( &pah->runnable_cv );
8293 MUTEX_UNLOCK( &stream_.mutex );
8295 pthread_join( pah->thread, 0 );
8296 if ( pah->s_play ) {
8297 pa_simple_flush( pah->s_play, NULL );
8298 pa_simple_free( pah->s_play );
8301 pa_simple_free( pah->s_rec );
8303 pthread_cond_destroy( &pah->runnable_cv );
8305 stream_.apiHandle = 0;
8308 if ( stream_.userBuffer[0] ) {
8309 free( stream_.userBuffer[0] );
8310 stream_.userBuffer[0] = 0;
8312 if ( stream_.userBuffer[1] ) {
8313 free( stream_.userBuffer[1] );
8314 stream_.userBuffer[1] = 0;
8317 stream_.state = STREAM_CLOSED;
8318 stream_.mode = UNINITIALIZED;
8321 void RtApiPulse::callbackEvent( void )
8323 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8325 if ( stream_.state == STREAM_STOPPED ) {
8326 MUTEX_LOCK( &stream_.mutex );
8327 while ( !pah->runnable )
8328 pthread_cond_wait( &pah->runnable_cv, &stream_.mutex );
8330 if ( stream_.state != STREAM_RUNNING ) {
8331 MUTEX_UNLOCK( &stream_.mutex );
8334 MUTEX_UNLOCK( &stream_.mutex );
8337 if ( stream_.state == STREAM_CLOSED ) {
8338 errorText_ = "RtApiPulse::callbackEvent(): the stream is closed ... "
8339 "this shouldn't happen!";
8340 error( RtAudioError::WARNING );
8344 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
8345 double streamTime = getStreamTime();
8346 RtAudioStreamStatus status = 0;
8347 int doStopStream = callback( stream_.userBuffer[OUTPUT], stream_.userBuffer[INPUT],
8348 stream_.bufferSize, streamTime, status,
8349 stream_.callbackInfo.userData );
8351 if ( doStopStream == 2 ) {
8356 MUTEX_LOCK( &stream_.mutex );
8357 void *pulse_in = stream_.doConvertBuffer[INPUT] ? stream_.deviceBuffer : stream_.userBuffer[INPUT];
8358 void *pulse_out = stream_.doConvertBuffer[OUTPUT] ? stream_.deviceBuffer : stream_.userBuffer[OUTPUT];
8360 if ( stream_.state != STREAM_RUNNING )
8365 if (stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8366 if ( stream_.doConvertBuffer[OUTPUT] ) {
8367 convertBuffer( stream_.deviceBuffer,
8368 stream_.userBuffer[OUTPUT],
8369 stream_.convertInfo[OUTPUT] );
8370 bytes = stream_.nDeviceChannels[OUTPUT] * stream_.bufferSize *
8371 formatBytes( stream_.deviceFormat[OUTPUT] );
8373 bytes = stream_.nUserChannels[OUTPUT] * stream_.bufferSize *
8374 formatBytes( stream_.userFormat );
8376 if ( pa_simple_write( pah->s_play, pulse_out, bytes, &pa_error ) < 0 ) {
8377 errorStream_ << "RtApiPulse::callbackEvent: audio write error, " <<
8378 pa_strerror( pa_error ) << ".";
8379 errorText_ = errorStream_.str();
8380 error( RtAudioError::WARNING );
8384 if ( stream_.mode == INPUT || stream_.mode == DUPLEX) {
8385 if ( stream_.doConvertBuffer[INPUT] )
8386 bytes = stream_.nDeviceChannels[INPUT] * stream_.bufferSize *
8387 formatBytes( stream_.deviceFormat[INPUT] );
8389 bytes = stream_.nUserChannels[INPUT] * stream_.bufferSize *
8390 formatBytes( stream_.userFormat );
8392 if ( pa_simple_read( pah->s_rec, pulse_in, bytes, &pa_error ) < 0 ) {
8393 errorStream_ << "RtApiPulse::callbackEvent: audio read error, " <<
8394 pa_strerror( pa_error ) << ".";
8395 errorText_ = errorStream_.str();
8396 error( RtAudioError::WARNING );
8398 if ( stream_.doConvertBuffer[INPUT] ) {
8399 convertBuffer( stream_.userBuffer[INPUT],
8400 stream_.deviceBuffer,
8401 stream_.convertInfo[INPUT] );
8406 MUTEX_UNLOCK( &stream_.mutex );
8407 RtApi::tickStreamTime();
8409 if ( doStopStream == 1 )
8413 void RtApiPulse::startStream( void )
8415 RtApi::startStream();
8416 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8418 if ( stream_.state == STREAM_CLOSED ) {
8419 errorText_ = "RtApiPulse::startStream(): the stream is not open!";
8420 error( RtAudioError::INVALID_USE );
8423 if ( stream_.state == STREAM_RUNNING ) {
8424 errorText_ = "RtApiPulse::startStream(): the stream is already running!";
8425 error( RtAudioError::WARNING );
8429 MUTEX_LOCK( &stream_.mutex );
8431 stream_.state = STREAM_RUNNING;
8433 pah->runnable = true;
8434 pthread_cond_signal( &pah->runnable_cv );
8435 MUTEX_UNLOCK( &stream_.mutex );
8438 void RtApiPulse::stopStream( void )
8440 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8442 if ( stream_.state == STREAM_CLOSED ) {
8443 errorText_ = "RtApiPulse::stopStream(): the stream is not open!";
8444 error( RtAudioError::INVALID_USE );
8447 if ( stream_.state == STREAM_STOPPED ) {
8448 errorText_ = "RtApiPulse::stopStream(): the stream is already stopped!";
8449 error( RtAudioError::WARNING );
8453 stream_.state = STREAM_STOPPED;
8454 pah->runnable = false;
8455 MUTEX_LOCK( &stream_.mutex );
8457 if ( pah && pah->s_play ) {
8459 if ( pa_simple_drain( pah->s_play, &pa_error ) < 0 ) {
8460 errorStream_ << "RtApiPulse::stopStream: error draining output device, " <<
8461 pa_strerror( pa_error ) << ".";
8462 errorText_ = errorStream_.str();
8463 MUTEX_UNLOCK( &stream_.mutex );
8464 error( RtAudioError::SYSTEM_ERROR );
8469 stream_.state = STREAM_STOPPED;
8470 MUTEX_UNLOCK( &stream_.mutex );
8473 void RtApiPulse::abortStream( void )
8475 PulseAudioHandle *pah = static_cast<PulseAudioHandle*>( stream_.apiHandle );
8477 if ( stream_.state == STREAM_CLOSED ) {
8478 errorText_ = "RtApiPulse::abortStream(): the stream is not open!";
8479 error( RtAudioError::INVALID_USE );
8482 if ( stream_.state == STREAM_STOPPED ) {
8483 errorText_ = "RtApiPulse::abortStream(): the stream is already stopped!";
8484 error( RtAudioError::WARNING );
8488 stream_.state = STREAM_STOPPED;
8489 pah->runnable = false;
8490 MUTEX_LOCK( &stream_.mutex );
8492 if ( pah && pah->s_play ) {
8494 if ( pa_simple_flush( pah->s_play, &pa_error ) < 0 ) {
8495 errorStream_ << "RtApiPulse::abortStream: error flushing output device, " <<
8496 pa_strerror( pa_error ) << ".";
8497 errorText_ = errorStream_.str();
8498 MUTEX_UNLOCK( &stream_.mutex );
8499 error( RtAudioError::SYSTEM_ERROR );
8504 stream_.state = STREAM_STOPPED;
8505 MUTEX_UNLOCK( &stream_.mutex );
8508 bool RtApiPulse::probeDeviceOpen( unsigned int device, StreamMode mode,
8509 unsigned int channels, unsigned int firstChannel,
8510 unsigned int sampleRate, RtAudioFormat format,
8511 unsigned int *bufferSize, RtAudio::StreamOptions *options )
8513 PulseAudioHandle *pah = 0;
8514 unsigned long bufferBytes = 0;
8517 if ( device != 0 ) return false;
8518 if ( mode != INPUT && mode != OUTPUT ) return false;
8519 if ( channels != 1 && channels != 2 ) {
8520 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported number of channels.";
8523 ss.channels = channels;
8525 if ( firstChannel != 0 ) return false;
8527 bool sr_found = false;
8528 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr ) {
8529 if ( sampleRate == *sr ) {
8531 stream_.sampleRate = sampleRate;
8532 ss.rate = sampleRate;
8537 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported sample rate.";
8542 for ( const rtaudio_pa_format_mapping_t *sf = supported_sampleformats;
8543 sf->rtaudio_format && sf->pa_format != PA_SAMPLE_INVALID; ++sf ) {
8544 if ( format == sf->rtaudio_format ) {
8546 stream_.userFormat = sf->rtaudio_format;
8547 stream_.deviceFormat[mode] = stream_.userFormat;
8548 ss.format = sf->pa_format;
8552 if ( !sf_found ) { // Use internal data format conversion.
8553 stream_.userFormat = format;
8554 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
8555 ss.format = PA_SAMPLE_FLOAT32LE;
8558 // Set other stream parameters.
8559 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
8560 else stream_.userInterleaved = true;
8561 stream_.deviceInterleaved[mode] = true;
8562 stream_.nBuffers = 1;
8563 stream_.doByteSwap[mode] = false;
8564 stream_.nUserChannels[mode] = channels;
8565 stream_.nDeviceChannels[mode] = channels + firstChannel;
8566 stream_.channelOffset[mode] = 0;
8567 std::string streamName = "RtAudio";
8569 // Set flags for buffer conversion.
8570 stream_.doConvertBuffer[mode] = false;
8571 if ( stream_.userFormat != stream_.deviceFormat[mode] )
8572 stream_.doConvertBuffer[mode] = true;
8573 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
8574 stream_.doConvertBuffer[mode] = true;
8576 // Allocate necessary internal buffers.
8577 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
8578 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
8579 if ( stream_.userBuffer[mode] == NULL ) {
8580 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating user buffer memory.";
8583 stream_.bufferSize = *bufferSize;
8585 if ( stream_.doConvertBuffer[mode] ) {
8587 bool makeBuffer = true;
8588 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
8589 if ( mode == INPUT ) {
8590 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
8591 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
8592 if ( bufferBytes <= bytesOut ) makeBuffer = false;
8597 bufferBytes *= *bufferSize;
8598 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
8599 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
8600 if ( stream_.deviceBuffer == NULL ) {
8601 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating device buffer memory.";
8607 stream_.device[mode] = device;
8609 // Setup the buffer conversion information structure.
8610 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
8612 if ( !stream_.apiHandle ) {
8613 PulseAudioHandle *pah = new PulseAudioHandle;
8615 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating memory for handle.";
8619 stream_.apiHandle = pah;
8620 if ( pthread_cond_init( &pah->runnable_cv, NULL ) != 0 ) {
8621 errorText_ = "RtApiPulse::probeDeviceOpen: error creating condition variable.";
8625 pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8628 if ( options && !options->streamName.empty() ) streamName = options->streamName;
8631 pa_buffer_attr buffer_attr;
8632 buffer_attr.fragsize = bufferBytes;
8633 buffer_attr.maxlength = -1;
8635 pah->s_rec = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_RECORD, NULL, "Record", &ss, NULL, &buffer_attr, &error );
8636 if ( !pah->s_rec ) {
8637 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting input to PulseAudio server.";
8642 pah->s_play = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_PLAYBACK, NULL, "Playback", &ss, NULL, NULL, &error );
8643 if ( !pah->s_play ) {
8644 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting output to PulseAudio server.";
8652 if ( stream_.mode == UNINITIALIZED )
8653 stream_.mode = mode;
8654 else if ( stream_.mode == mode )
8657 stream_.mode = DUPLEX;
8659 if ( !stream_.callbackInfo.isRunning ) {
8660 stream_.callbackInfo.object = this;
8661 stream_.callbackInfo.isRunning = true;
8662 if ( pthread_create( &pah->thread, NULL, pulseaudio_callback, (void *)&stream_.callbackInfo) != 0 ) {
8663 errorText_ = "RtApiPulse::probeDeviceOpen: error creating thread.";
8668 stream_.state = STREAM_STOPPED;
8672 if ( pah && stream_.callbackInfo.isRunning ) {
8673 pthread_cond_destroy( &pah->runnable_cv );
8675 stream_.apiHandle = 0;
8678 for ( int i=0; i<2; i++ ) {
8679 if ( stream_.userBuffer[i] ) {
8680 free( stream_.userBuffer[i] );
8681 stream_.userBuffer[i] = 0;
8685 if ( stream_.deviceBuffer ) {
8686 free( stream_.deviceBuffer );
8687 stream_.deviceBuffer = 0;
8693 //******************** End of __LINUX_PULSE__ *********************//
8696 #if defined(__LINUX_OSS__)
8699 #include <sys/ioctl.h>
8702 #include <sys/soundcard.h>
8706 static void *ossCallbackHandler(void * ptr);
8708 // A structure to hold various information related to the OSS API
8711 int id[2]; // device ids
8714 pthread_cond_t runnable;
8717 :triggered(false) { id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
8720 RtApiOss :: RtApiOss()
8722 // Nothing to do here.
8725 RtApiOss :: ~RtApiOss()
8727 if ( stream_.state != STREAM_CLOSED ) closeStream();
8730 unsigned int RtApiOss :: getDeviceCount( void )
8732 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
8733 if ( mixerfd == -1 ) {
8734 errorText_ = "RtApiOss::getDeviceCount: error opening '/dev/mixer'.";
8735 error( RtAudioError::WARNING );
8739 oss_sysinfo sysinfo;
8740 if ( ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo ) == -1 ) {
8742 errorText_ = "RtApiOss::getDeviceCount: error getting sysinfo, OSS version >= 4.0 is required.";
8743 error( RtAudioError::WARNING );
8748 return sysinfo.numaudios;
8751 RtAudio::DeviceInfo RtApiOss :: getDeviceInfo( unsigned int device )
8753 RtAudio::DeviceInfo info;
8754 info.probed = false;
8756 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
8757 if ( mixerfd == -1 ) {
8758 errorText_ = "RtApiOss::getDeviceInfo: error opening '/dev/mixer'.";
8759 error( RtAudioError::WARNING );
8763 oss_sysinfo sysinfo;
8764 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
8765 if ( result == -1 ) {
8767 errorText_ = "RtApiOss::getDeviceInfo: error getting sysinfo, OSS version >= 4.0 is required.";
8768 error( RtAudioError::WARNING );
8772 unsigned nDevices = sysinfo.numaudios;
8773 if ( nDevices == 0 ) {
8775 errorText_ = "RtApiOss::getDeviceInfo: no devices found!";
8776 error( RtAudioError::INVALID_USE );
8780 if ( device >= nDevices ) {
8782 errorText_ = "RtApiOss::getDeviceInfo: device ID is invalid!";
8783 error( RtAudioError::INVALID_USE );
8787 oss_audioinfo ainfo;
8789 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
8791 if ( result == -1 ) {
8792 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
8793 errorText_ = errorStream_.str();
8794 error( RtAudioError::WARNING );
8799 if ( ainfo.caps & PCM_CAP_OUTPUT ) info.outputChannels = ainfo.max_channels;
8800 if ( ainfo.caps & PCM_CAP_INPUT ) info.inputChannels = ainfo.max_channels;
8801 if ( ainfo.caps & PCM_CAP_DUPLEX ) {
8802 if ( info.outputChannels > 0 && info.inputChannels > 0 && ainfo.caps & PCM_CAP_DUPLEX )
8803 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
8806 // Probe data formats ... do for input
8807 unsigned long mask = ainfo.iformats;
8808 if ( mask & AFMT_S16_LE || mask & AFMT_S16_BE )
8809 info.nativeFormats |= RTAUDIO_SINT16;
8810 if ( mask & AFMT_S8 )
8811 info.nativeFormats |= RTAUDIO_SINT8;
8812 if ( mask & AFMT_S32_LE || mask & AFMT_S32_BE )
8813 info.nativeFormats |= RTAUDIO_SINT32;
8815 if ( mask & AFMT_FLOAT )
8816 info.nativeFormats |= RTAUDIO_FLOAT32;
8818 if ( mask & AFMT_S24_LE || mask & AFMT_S24_BE )
8819 info.nativeFormats |= RTAUDIO_SINT24;
8821 // Check that we have at least one supported format
8822 if ( info.nativeFormats == 0 ) {
8823 errorStream_ << "RtApiOss::getDeviceInfo: device (" << ainfo.name << ") data format not supported by RtAudio.";
8824 errorText_ = errorStream_.str();
8825 error( RtAudioError::WARNING );
8829 // Probe the supported sample rates.
8830 info.sampleRates.clear();
8831 if ( ainfo.nrates ) {
8832 for ( unsigned int i=0; i<ainfo.nrates; i++ ) {
8833 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
8834 if ( ainfo.rates[i] == SAMPLE_RATES[k] ) {
8835 info.sampleRates.push_back( SAMPLE_RATES[k] );
8837 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
8838 info.preferredSampleRate = SAMPLE_RATES[k];
8846 // Check min and max rate values;
8847 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
8848 if ( ainfo.min_rate <= (int) SAMPLE_RATES[k] && ainfo.max_rate >= (int) SAMPLE_RATES[k] ) {
8849 info.sampleRates.push_back( SAMPLE_RATES[k] );
8851 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
8852 info.preferredSampleRate = SAMPLE_RATES[k];
8857 if ( info.sampleRates.size() == 0 ) {
8858 errorStream_ << "RtApiOss::getDeviceInfo: no supported sample rates found for device (" << ainfo.name << ").";
8859 errorText_ = errorStream_.str();
8860 error( RtAudioError::WARNING );
8864 info.name = ainfo.name;
8871 bool RtApiOss :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
8872 unsigned int firstChannel, unsigned int sampleRate,
8873 RtAudioFormat format, unsigned int *bufferSize,
8874 RtAudio::StreamOptions *options )
8876 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
8877 if ( mixerfd == -1 ) {
8878 errorText_ = "RtApiOss::probeDeviceOpen: error opening '/dev/mixer'.";
8882 oss_sysinfo sysinfo;
8883 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
8884 if ( result == -1 ) {
8886 errorText_ = "RtApiOss::probeDeviceOpen: error getting sysinfo, OSS version >= 4.0 is required.";
8890 unsigned nDevices = sysinfo.numaudios;
8891 if ( nDevices == 0 ) {
8892 // This should not happen because a check is made before this function is called.
8894 errorText_ = "RtApiOss::probeDeviceOpen: no devices found!";
8898 if ( device >= nDevices ) {
8899 // This should not happen because a check is made before this function is called.
8901 errorText_ = "RtApiOss::probeDeviceOpen: device ID is invalid!";
8905 oss_audioinfo ainfo;
8907 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
8909 if ( result == -1 ) {
8910 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
8911 errorText_ = errorStream_.str();
8915 // Check if device supports input or output
8916 if ( ( mode == OUTPUT && !( ainfo.caps & PCM_CAP_OUTPUT ) ) ||
8917 ( mode == INPUT && !( ainfo.caps & PCM_CAP_INPUT ) ) ) {
8918 if ( mode == OUTPUT )
8919 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support output.";
8921 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support input.";
8922 errorText_ = errorStream_.str();
8927 OssHandle *handle = (OssHandle *) stream_.apiHandle;
8928 if ( mode == OUTPUT )
8930 else { // mode == INPUT
8931 if (stream_.mode == OUTPUT && stream_.device[0] == device) {
8932 // We just set the same device for playback ... close and reopen for duplex (OSS only).
8933 close( handle->id[0] );
8935 if ( !( ainfo.caps & PCM_CAP_DUPLEX ) ) {
8936 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support duplex mode.";
8937 errorText_ = errorStream_.str();
8940 // Check that the number previously set channels is the same.
8941 if ( stream_.nUserChannels[0] != channels ) {
8942 errorStream_ << "RtApiOss::probeDeviceOpen: input/output channels must be equal for OSS duplex device (" << ainfo.name << ").";
8943 errorText_ = errorStream_.str();
8952 // Set exclusive access if specified.
8953 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) flags |= O_EXCL;
8955 // Try to open the device.
8957 fd = open( ainfo.devnode, flags, 0 );
8959 if ( errno == EBUSY )
8960 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") is busy.";
8962 errorStream_ << "RtApiOss::probeDeviceOpen: error opening device (" << ainfo.name << ").";
8963 errorText_ = errorStream_.str();
8967 // For duplex operation, specifically set this mode (this doesn't seem to work).
8969 if ( flags | O_RDWR ) {
8970 result = ioctl( fd, SNDCTL_DSP_SETDUPLEX, NULL );
8971 if ( result == -1) {
8972 errorStream_ << "RtApiOss::probeDeviceOpen: error setting duplex mode for device (" << ainfo.name << ").";
8973 errorText_ = errorStream_.str();
8979 // Check the device channel support.
8980 stream_.nUserChannels[mode] = channels;
8981 if ( ainfo.max_channels < (int)(channels + firstChannel) ) {
8983 errorStream_ << "RtApiOss::probeDeviceOpen: the device (" << ainfo.name << ") does not support requested channel parameters.";
8984 errorText_ = errorStream_.str();
8988 // Set the number of channels.
8989 int deviceChannels = channels + firstChannel;
8990 result = ioctl( fd, SNDCTL_DSP_CHANNELS, &deviceChannels );
8991 if ( result == -1 || deviceChannels < (int)(channels + firstChannel) ) {
8993 errorStream_ << "RtApiOss::probeDeviceOpen: error setting channel parameters on device (" << ainfo.name << ").";
8994 errorText_ = errorStream_.str();
8997 stream_.nDeviceChannels[mode] = deviceChannels;
8999 // Get the data format mask
9001 result = ioctl( fd, SNDCTL_DSP_GETFMTS, &mask );
9002 if ( result == -1 ) {
9004 errorStream_ << "RtApiOss::probeDeviceOpen: error getting device (" << ainfo.name << ") data formats.";
9005 errorText_ = errorStream_.str();
9009 // Determine how to set the device format.
9010 stream_.userFormat = format;
9011 int deviceFormat = -1;
9012 stream_.doByteSwap[mode] = false;
9013 if ( format == RTAUDIO_SINT8 ) {
9014 if ( mask & AFMT_S8 ) {
9015 deviceFormat = AFMT_S8;
9016 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
9019 else if ( format == RTAUDIO_SINT16 ) {
9020 if ( mask & AFMT_S16_NE ) {
9021 deviceFormat = AFMT_S16_NE;
9022 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9024 else if ( mask & AFMT_S16_OE ) {
9025 deviceFormat = AFMT_S16_OE;
9026 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9027 stream_.doByteSwap[mode] = true;
9030 else if ( format == RTAUDIO_SINT24 ) {
9031 if ( mask & AFMT_S24_NE ) {
9032 deviceFormat = AFMT_S24_NE;
9033 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9035 else if ( mask & AFMT_S24_OE ) {
9036 deviceFormat = AFMT_S24_OE;
9037 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9038 stream_.doByteSwap[mode] = true;
9041 else if ( format == RTAUDIO_SINT32 ) {
9042 if ( mask & AFMT_S32_NE ) {
9043 deviceFormat = AFMT_S32_NE;
9044 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9046 else if ( mask & AFMT_S32_OE ) {
9047 deviceFormat = AFMT_S32_OE;
9048 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9049 stream_.doByteSwap[mode] = true;
9053 if ( deviceFormat == -1 ) {
9054 // The user requested format is not natively supported by the device.
9055 if ( mask & AFMT_S16_NE ) {
9056 deviceFormat = AFMT_S16_NE;
9057 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9059 else if ( mask & AFMT_S32_NE ) {
9060 deviceFormat = AFMT_S32_NE;
9061 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9063 else if ( mask & AFMT_S24_NE ) {
9064 deviceFormat = AFMT_S24_NE;
9065 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9067 else if ( mask & AFMT_S16_OE ) {
9068 deviceFormat = AFMT_S16_OE;
9069 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9070 stream_.doByteSwap[mode] = true;
9072 else if ( mask & AFMT_S32_OE ) {
9073 deviceFormat = AFMT_S32_OE;
9074 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9075 stream_.doByteSwap[mode] = true;
9077 else if ( mask & AFMT_S24_OE ) {
9078 deviceFormat = AFMT_S24_OE;
9079 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9080 stream_.doByteSwap[mode] = true;
9082 else if ( mask & AFMT_S8) {
9083 deviceFormat = AFMT_S8;
9084 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
9088 if ( stream_.deviceFormat[mode] == 0 ) {
9089 // This really shouldn't happen ...
9091 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") data format not supported by RtAudio.";
9092 errorText_ = errorStream_.str();
9096 // Set the data format.
9097 int temp = deviceFormat;
9098 result = ioctl( fd, SNDCTL_DSP_SETFMT, &deviceFormat );
9099 if ( result == -1 || deviceFormat != temp ) {
9101 errorStream_ << "RtApiOss::probeDeviceOpen: error setting data format on device (" << ainfo.name << ").";
9102 errorText_ = errorStream_.str();
9106 // Attempt to set the buffer size. According to OSS, the minimum
9107 // number of buffers is two. The supposed minimum buffer size is 16
9108 // bytes, so that will be our lower bound. The argument to this
9109 // call is in the form 0xMMMMSSSS (hex), where the buffer size (in
9110 // bytes) is given as 2^SSSS and the number of buffers as 2^MMMM.
9111 // We'll check the actual value used near the end of the setup
9113 int ossBufferBytes = *bufferSize * formatBytes( stream_.deviceFormat[mode] ) * deviceChannels;
9114 if ( ossBufferBytes < 16 ) ossBufferBytes = 16;
9116 if ( options ) buffers = options->numberOfBuffers;
9117 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) buffers = 2;
9118 if ( buffers < 2 ) buffers = 3;
9119 temp = ((int) buffers << 16) + (int)( log10( (double)ossBufferBytes ) / log10( 2.0 ) );
9120 result = ioctl( fd, SNDCTL_DSP_SETFRAGMENT, &temp );
9121 if ( result == -1 ) {
9123 errorStream_ << "RtApiOss::probeDeviceOpen: error setting buffer size on device (" << ainfo.name << ").";
9124 errorText_ = errorStream_.str();
9127 stream_.nBuffers = buffers;
9129 // Save buffer size (in sample frames).
9130 *bufferSize = ossBufferBytes / ( formatBytes(stream_.deviceFormat[mode]) * deviceChannels );
9131 stream_.bufferSize = *bufferSize;
9133 // Set the sample rate.
9134 int srate = sampleRate;
9135 result = ioctl( fd, SNDCTL_DSP_SPEED, &srate );
9136 if ( result == -1 ) {
9138 errorStream_ << "RtApiOss::probeDeviceOpen: error setting sample rate (" << sampleRate << ") on device (" << ainfo.name << ").";
9139 errorText_ = errorStream_.str();
9143 // Verify the sample rate setup worked.
9144 if ( abs( srate - (int)sampleRate ) > 100 ) {
9146 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support sample rate (" << sampleRate << ").";
9147 errorText_ = errorStream_.str();
9150 stream_.sampleRate = sampleRate;
9152 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device) {
9153 // We're doing duplex setup here.
9154 stream_.deviceFormat[0] = stream_.deviceFormat[1];
9155 stream_.nDeviceChannels[0] = deviceChannels;
9158 // Set interleaving parameters.
9159 stream_.userInterleaved = true;
9160 stream_.deviceInterleaved[mode] = true;
9161 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
9162 stream_.userInterleaved = false;
9164 // Set flags for buffer conversion
9165 stream_.doConvertBuffer[mode] = false;
9166 if ( stream_.userFormat != stream_.deviceFormat[mode] )
9167 stream_.doConvertBuffer[mode] = true;
9168 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
9169 stream_.doConvertBuffer[mode] = true;
9170 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
9171 stream_.nUserChannels[mode] > 1 )
9172 stream_.doConvertBuffer[mode] = true;
9174 // Allocate the stream handles if necessary and then save.
9175 if ( stream_.apiHandle == 0 ) {
9177 handle = new OssHandle;
9179 catch ( std::bad_alloc& ) {
9180 errorText_ = "RtApiOss::probeDeviceOpen: error allocating OssHandle memory.";
9184 if ( pthread_cond_init( &handle->runnable, NULL ) ) {
9185 errorText_ = "RtApiOss::probeDeviceOpen: error initializing pthread condition variable.";
9189 stream_.apiHandle = (void *) handle;
9192 handle = (OssHandle *) stream_.apiHandle;
9194 handle->id[mode] = fd;
9196 // Allocate necessary internal buffers.
9197 unsigned long bufferBytes;
9198 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
9199 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
9200 if ( stream_.userBuffer[mode] == NULL ) {
9201 errorText_ = "RtApiOss::probeDeviceOpen: error allocating user buffer memory.";
9205 if ( stream_.doConvertBuffer[mode] ) {
9207 bool makeBuffer = true;
9208 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
9209 if ( mode == INPUT ) {
9210 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
9211 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
9212 if ( bufferBytes <= bytesOut ) makeBuffer = false;
9217 bufferBytes *= *bufferSize;
9218 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
9219 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
9220 if ( stream_.deviceBuffer == NULL ) {
9221 errorText_ = "RtApiOss::probeDeviceOpen: error allocating device buffer memory.";
9227 stream_.device[mode] = device;
9228 stream_.state = STREAM_STOPPED;
9230 // Setup the buffer conversion information structure.
9231 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
9233 // Setup thread if necessary.
9234 if ( stream_.mode == OUTPUT && mode == INPUT ) {
9235 // We had already set up an output stream.
9236 stream_.mode = DUPLEX;
9237 if ( stream_.device[0] == device ) handle->id[0] = fd;
9240 stream_.mode = mode;
9242 // Setup callback thread.
9243 stream_.callbackInfo.object = (void *) this;
9245 // Set the thread attributes for joinable and realtime scheduling
9246 // priority. The higher priority will only take affect if the
9247 // program is run as root or suid.
9248 pthread_attr_t attr;
9249 pthread_attr_init( &attr );
9250 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
9251 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
9252 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
9253 struct sched_param param;
9254 int priority = options->priority;
9255 int min = sched_get_priority_min( SCHED_RR );
9256 int max = sched_get_priority_max( SCHED_RR );
9257 if ( priority < min ) priority = min;
9258 else if ( priority > max ) priority = max;
9259 param.sched_priority = priority;
9260 pthread_attr_setschedparam( &attr, ¶m );
9261 pthread_attr_setschedpolicy( &attr, SCHED_RR );
9264 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
9266 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
9269 stream_.callbackInfo.isRunning = true;
9270 result = pthread_create( &stream_.callbackInfo.thread, &attr, ossCallbackHandler, &stream_.callbackInfo );
9271 pthread_attr_destroy( &attr );
9273 stream_.callbackInfo.isRunning = false;
9274 errorText_ = "RtApiOss::error creating callback thread!";
9283 pthread_cond_destroy( &handle->runnable );
9284 if ( handle->id[0] ) close( handle->id[0] );
9285 if ( handle->id[1] ) close( handle->id[1] );
9287 stream_.apiHandle = 0;
9290 for ( int i=0; i<2; i++ ) {
9291 if ( stream_.userBuffer[i] ) {
9292 free( stream_.userBuffer[i] );
9293 stream_.userBuffer[i] = 0;
9297 if ( stream_.deviceBuffer ) {
9298 free( stream_.deviceBuffer );
9299 stream_.deviceBuffer = 0;
9305 void RtApiOss :: closeStream()
9307 if ( stream_.state == STREAM_CLOSED ) {
9308 errorText_ = "RtApiOss::closeStream(): no open stream to close!";
9309 error( RtAudioError::WARNING );
9313 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9314 stream_.callbackInfo.isRunning = false;
9315 MUTEX_LOCK( &stream_.mutex );
9316 if ( stream_.state == STREAM_STOPPED )
9317 pthread_cond_signal( &handle->runnable );
9318 MUTEX_UNLOCK( &stream_.mutex );
9319 pthread_join( stream_.callbackInfo.thread, NULL );
9321 if ( stream_.state == STREAM_RUNNING ) {
9322 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
9323 ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9325 ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9326 stream_.state = STREAM_STOPPED;
9330 pthread_cond_destroy( &handle->runnable );
9331 if ( handle->id[0] ) close( handle->id[0] );
9332 if ( handle->id[1] ) close( handle->id[1] );
9334 stream_.apiHandle = 0;
9337 for ( int i=0; i<2; i++ ) {
9338 if ( stream_.userBuffer[i] ) {
9339 free( stream_.userBuffer[i] );
9340 stream_.userBuffer[i] = 0;
9344 if ( stream_.deviceBuffer ) {
9345 free( stream_.deviceBuffer );
9346 stream_.deviceBuffer = 0;
9349 stream_.mode = UNINITIALIZED;
9350 stream_.state = STREAM_CLOSED;
9353 void RtApiOss :: startStream()
9356 RtApi::startStream();
9357 if ( stream_.state == STREAM_RUNNING ) {
9358 errorText_ = "RtApiOss::startStream(): the stream is already running!";
9359 error( RtAudioError::WARNING );
9363 MUTEX_LOCK( &stream_.mutex );
9365 stream_.state = STREAM_RUNNING;
9367 // No need to do anything else here ... OSS automatically starts
9368 // when fed samples.
9370 MUTEX_UNLOCK( &stream_.mutex );
9372 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9373 pthread_cond_signal( &handle->runnable );
9376 void RtApiOss :: stopStream()
9379 if ( stream_.state == STREAM_STOPPED ) {
9380 errorText_ = "RtApiOss::stopStream(): the stream is already stopped!";
9381 error( RtAudioError::WARNING );
9385 MUTEX_LOCK( &stream_.mutex );
9387 // The state might change while waiting on a mutex.
9388 if ( stream_.state == STREAM_STOPPED ) {
9389 MUTEX_UNLOCK( &stream_.mutex );
9394 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9395 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9397 // Flush the output with zeros a few times.
9400 RtAudioFormat format;
9402 if ( stream_.doConvertBuffer[0] ) {
9403 buffer = stream_.deviceBuffer;
9404 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
9405 format = stream_.deviceFormat[0];
9408 buffer = stream_.userBuffer[0];
9409 samples = stream_.bufferSize * stream_.nUserChannels[0];
9410 format = stream_.userFormat;
9413 memset( buffer, 0, samples * formatBytes(format) );
9414 for ( unsigned int i=0; i<stream_.nBuffers+1; i++ ) {
9415 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9416 if ( result == -1 ) {
9417 errorText_ = "RtApiOss::stopStream: audio write error.";
9418 error( RtAudioError::WARNING );
9422 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9423 if ( result == -1 ) {
9424 errorStream_ << "RtApiOss::stopStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
9425 errorText_ = errorStream_.str();
9428 handle->triggered = false;
9431 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
9432 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9433 if ( result == -1 ) {
9434 errorStream_ << "RtApiOss::stopStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
9435 errorText_ = errorStream_.str();
9441 stream_.state = STREAM_STOPPED;
9442 MUTEX_UNLOCK( &stream_.mutex );
9444 if ( result != -1 ) return;
9445 error( RtAudioError::SYSTEM_ERROR );
9448 void RtApiOss :: abortStream()
9451 if ( stream_.state == STREAM_STOPPED ) {
9452 errorText_ = "RtApiOss::abortStream(): the stream is already stopped!";
9453 error( RtAudioError::WARNING );
9457 MUTEX_LOCK( &stream_.mutex );
9459 // The state might change while waiting on a mutex.
9460 if ( stream_.state == STREAM_STOPPED ) {
9461 MUTEX_UNLOCK( &stream_.mutex );
9466 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9467 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9468 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9469 if ( result == -1 ) {
9470 errorStream_ << "RtApiOss::abortStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
9471 errorText_ = errorStream_.str();
9474 handle->triggered = false;
9477 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
9478 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9479 if ( result == -1 ) {
9480 errorStream_ << "RtApiOss::abortStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
9481 errorText_ = errorStream_.str();
9487 stream_.state = STREAM_STOPPED;
9488 MUTEX_UNLOCK( &stream_.mutex );
9490 if ( result != -1 ) return;
9491 error( RtAudioError::SYSTEM_ERROR );
9494 void RtApiOss :: callbackEvent()
9496 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9497 if ( stream_.state == STREAM_STOPPED ) {
9498 MUTEX_LOCK( &stream_.mutex );
9499 pthread_cond_wait( &handle->runnable, &stream_.mutex );
9500 if ( stream_.state != STREAM_RUNNING ) {
9501 MUTEX_UNLOCK( &stream_.mutex );
9504 MUTEX_UNLOCK( &stream_.mutex );
9507 if ( stream_.state == STREAM_CLOSED ) {
9508 errorText_ = "RtApiOss::callbackEvent(): the stream is closed ... this shouldn't happen!";
9509 error( RtAudioError::WARNING );
9513 // Invoke user callback to get fresh output data.
9514 int doStopStream = 0;
9515 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
9516 double streamTime = getStreamTime();
9517 RtAudioStreamStatus status = 0;
9518 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
9519 status |= RTAUDIO_OUTPUT_UNDERFLOW;
9520 handle->xrun[0] = false;
9522 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
9523 status |= RTAUDIO_INPUT_OVERFLOW;
9524 handle->xrun[1] = false;
9526 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
9527 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
9528 if ( doStopStream == 2 ) {
9529 this->abortStream();
9533 MUTEX_LOCK( &stream_.mutex );
9535 // The state might change while waiting on a mutex.
9536 if ( stream_.state == STREAM_STOPPED ) goto unlock;
9541 RtAudioFormat format;
9543 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9545 // Setup parameters and do buffer conversion if necessary.
9546 if ( stream_.doConvertBuffer[0] ) {
9547 buffer = stream_.deviceBuffer;
9548 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
9549 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
9550 format = stream_.deviceFormat[0];
9553 buffer = stream_.userBuffer[0];
9554 samples = stream_.bufferSize * stream_.nUserChannels[0];
9555 format = stream_.userFormat;
9558 // Do byte swapping if necessary.
9559 if ( stream_.doByteSwap[0] )
9560 byteSwapBuffer( buffer, samples, format );
9562 if ( stream_.mode == DUPLEX && handle->triggered == false ) {
9564 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
9565 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9566 trig = PCM_ENABLE_INPUT|PCM_ENABLE_OUTPUT;
9567 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
9568 handle->triggered = true;
9571 // Write samples to device.
9572 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9574 if ( result == -1 ) {
9575 // We'll assume this is an underrun, though there isn't a
9576 // specific means for determining that.
9577 handle->xrun[0] = true;
9578 errorText_ = "RtApiOss::callbackEvent: audio write error.";
9579 error( RtAudioError::WARNING );
9580 // Continue on to input section.
9584 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
9586 // Setup parameters.
9587 if ( stream_.doConvertBuffer[1] ) {
9588 buffer = stream_.deviceBuffer;
9589 samples = stream_.bufferSize * stream_.nDeviceChannels[1];
9590 format = stream_.deviceFormat[1];
9593 buffer = stream_.userBuffer[1];
9594 samples = stream_.bufferSize * stream_.nUserChannels[1];
9595 format = stream_.userFormat;
9598 // Read samples from device.
9599 result = read( handle->id[1], buffer, samples * formatBytes(format) );
9601 if ( result == -1 ) {
9602 // We'll assume this is an overrun, though there isn't a
9603 // specific means for determining that.
9604 handle->xrun[1] = true;
9605 errorText_ = "RtApiOss::callbackEvent: audio read error.";
9606 error( RtAudioError::WARNING );
9610 // Do byte swapping if necessary.
9611 if ( stream_.doByteSwap[1] )
9612 byteSwapBuffer( buffer, samples, format );
9614 // Do buffer conversion if necessary.
9615 if ( stream_.doConvertBuffer[1] )
9616 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
9620 MUTEX_UNLOCK( &stream_.mutex );
9622 RtApi::tickStreamTime();
9623 if ( doStopStream == 1 ) this->stopStream();
9626 static void *ossCallbackHandler( void *ptr )
9628 CallbackInfo *info = (CallbackInfo *) ptr;
9629 RtApiOss *object = (RtApiOss *) info->object;
9630 bool *isRunning = &info->isRunning;
9632 while ( *isRunning == true ) {
9633 pthread_testcancel();
9634 object->callbackEvent();
9637 pthread_exit( NULL );
9640 //******************** End of __LINUX_OSS__ *********************//
9644 // *************************************************** //
9646 // Protected common (OS-independent) RtAudio methods.
9648 // *************************************************** //
9650 // This method can be modified to control the behavior of error
9651 // message printing.
9652 void RtApi :: error( RtAudioError::Type type )
9654 errorStream_.str(""); // clear the ostringstream
9656 RtAudioErrorCallback errorCallback = (RtAudioErrorCallback) stream_.callbackInfo.errorCallback;
9657 if ( errorCallback ) {
9658 // abortStream() can generate new error messages. Ignore them. Just keep original one.
9660 if ( firstErrorOccurred_ )
9663 firstErrorOccurred_ = true;
9664 const std::string errorMessage = errorText_;
9666 if ( type != RtAudioError::WARNING && stream_.state != STREAM_STOPPED) {
9667 stream_.callbackInfo.isRunning = false; // exit from the thread
9671 errorCallback( type, errorMessage );
9672 firstErrorOccurred_ = false;
9676 if ( type == RtAudioError::WARNING && showWarnings_ == true )
9677 std::cerr << '\n' << errorText_ << "\n\n";
9678 else if ( type != RtAudioError::WARNING )
9679 throw( RtAudioError( errorText_, type ) );
9682 void RtApi :: verifyStream()
9684 if ( stream_.state == STREAM_CLOSED ) {
9685 errorText_ = "RtApi:: a stream is not open!";
9686 error( RtAudioError::INVALID_USE );
9690 void RtApi :: clearStreamInfo()
9692 stream_.mode = UNINITIALIZED;
9693 stream_.state = STREAM_CLOSED;
9694 stream_.sampleRate = 0;
9695 stream_.bufferSize = 0;
9696 stream_.nBuffers = 0;
9697 stream_.userFormat = 0;
9698 stream_.userInterleaved = true;
9699 stream_.streamTime = 0.0;
9700 stream_.apiHandle = 0;
9701 stream_.deviceBuffer = 0;
9702 stream_.callbackInfo.callback = 0;
9703 stream_.callbackInfo.userData = 0;
9704 stream_.callbackInfo.isRunning = false;
9705 stream_.callbackInfo.errorCallback = 0;
9706 for ( int i=0; i<2; i++ ) {
9707 stream_.device[i] = 11111;
9708 stream_.doConvertBuffer[i] = false;
9709 stream_.deviceInterleaved[i] = true;
9710 stream_.doByteSwap[i] = false;
9711 stream_.nUserChannels[i] = 0;
9712 stream_.nDeviceChannels[i] = 0;
9713 stream_.channelOffset[i] = 0;
9714 stream_.deviceFormat[i] = 0;
9715 stream_.latency[i] = 0;
9716 stream_.userBuffer[i] = 0;
9717 stream_.convertInfo[i].channels = 0;
9718 stream_.convertInfo[i].inJump = 0;
9719 stream_.convertInfo[i].outJump = 0;
9720 stream_.convertInfo[i].inFormat = 0;
9721 stream_.convertInfo[i].outFormat = 0;
9722 stream_.convertInfo[i].inOffset.clear();
9723 stream_.convertInfo[i].outOffset.clear();
9727 unsigned int RtApi :: formatBytes( RtAudioFormat format )
9729 if ( format == RTAUDIO_SINT16 )
9731 else if ( format == RTAUDIO_SINT32 || format == RTAUDIO_FLOAT32 )
9733 else if ( format == RTAUDIO_FLOAT64 )
9735 else if ( format == RTAUDIO_SINT24 )
9737 else if ( format == RTAUDIO_SINT8 )
9740 errorText_ = "RtApi::formatBytes: undefined format.";
9741 error( RtAudioError::WARNING );
9746 void RtApi :: setConvertInfo( StreamMode mode, unsigned int firstChannel )
9748 if ( mode == INPUT ) { // convert device to user buffer
9749 stream_.convertInfo[mode].inJump = stream_.nDeviceChannels[1];
9750 stream_.convertInfo[mode].outJump = stream_.nUserChannels[1];
9751 stream_.convertInfo[mode].inFormat = stream_.deviceFormat[1];
9752 stream_.convertInfo[mode].outFormat = stream_.userFormat;
9754 else { // convert user to device buffer
9755 stream_.convertInfo[mode].inJump = stream_.nUserChannels[0];
9756 stream_.convertInfo[mode].outJump = stream_.nDeviceChannels[0];
9757 stream_.convertInfo[mode].inFormat = stream_.userFormat;
9758 stream_.convertInfo[mode].outFormat = stream_.deviceFormat[0];
9761 if ( stream_.convertInfo[mode].inJump < stream_.convertInfo[mode].outJump )
9762 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].inJump;
9764 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].outJump;
9766 // Set up the interleave/deinterleave offsets.
9767 if ( stream_.deviceInterleaved[mode] != stream_.userInterleaved ) {
9768 if ( ( mode == OUTPUT && stream_.deviceInterleaved[mode] ) ||
9769 ( mode == INPUT && stream_.userInterleaved ) ) {
9770 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
9771 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
9772 stream_.convertInfo[mode].outOffset.push_back( k );
9773 stream_.convertInfo[mode].inJump = 1;
9777 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
9778 stream_.convertInfo[mode].inOffset.push_back( k );
9779 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
9780 stream_.convertInfo[mode].outJump = 1;
9784 else { // no (de)interleaving
9785 if ( stream_.userInterleaved ) {
9786 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
9787 stream_.convertInfo[mode].inOffset.push_back( k );
9788 stream_.convertInfo[mode].outOffset.push_back( k );
9792 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
9793 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
9794 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
9795 stream_.convertInfo[mode].inJump = 1;
9796 stream_.convertInfo[mode].outJump = 1;
9801 // Add channel offset.
9802 if ( firstChannel > 0 ) {
9803 if ( stream_.deviceInterleaved[mode] ) {
9804 if ( mode == OUTPUT ) {
9805 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
9806 stream_.convertInfo[mode].outOffset[k] += firstChannel;
9809 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
9810 stream_.convertInfo[mode].inOffset[k] += firstChannel;
9814 if ( mode == OUTPUT ) {
9815 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
9816 stream_.convertInfo[mode].outOffset[k] += ( firstChannel * stream_.bufferSize );
9819 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
9820 stream_.convertInfo[mode].inOffset[k] += ( firstChannel * stream_.bufferSize );
9826 void RtApi :: convertBuffer( char *outBuffer, char *inBuffer, ConvertInfo &info )
9828 // This function does format conversion, input/output channel compensation, and
9829 // data interleaving/deinterleaving. 24-bit integers are assumed to occupy
9830 // the lower three bytes of a 32-bit integer.
9832 // Clear our device buffer when in/out duplex device channels are different
9833 if ( outBuffer == stream_.deviceBuffer && stream_.mode == DUPLEX &&
9834 ( stream_.nDeviceChannels[0] < stream_.nDeviceChannels[1] ) )
9835 memset( outBuffer, 0, stream_.bufferSize * info.outJump * formatBytes( info.outFormat ) );
9838 if (info.outFormat == RTAUDIO_FLOAT64) {
9840 Float64 *out = (Float64 *)outBuffer;
9842 if (info.inFormat == RTAUDIO_SINT8) {
9843 signed char *in = (signed char *)inBuffer;
9844 scale = 1.0 / 127.5;
9845 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9846 for (j=0; j<info.channels; j++) {
9847 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
9848 out[info.outOffset[j]] += 0.5;
9849 out[info.outOffset[j]] *= scale;
9852 out += info.outJump;
9855 else if (info.inFormat == RTAUDIO_SINT16) {
9856 Int16 *in = (Int16 *)inBuffer;
9857 scale = 1.0 / 32767.5;
9858 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9859 for (j=0; j<info.channels; j++) {
9860 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
9861 out[info.outOffset[j]] += 0.5;
9862 out[info.outOffset[j]] *= scale;
9865 out += info.outJump;
9868 else if (info.inFormat == RTAUDIO_SINT24) {
9869 Int24 *in = (Int24 *)inBuffer;
9870 scale = 1.0 / 8388607.5;
9871 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9872 for (j=0; j<info.channels; j++) {
9873 out[info.outOffset[j]] = (Float64) (in[info.inOffset[j]].asInt());
9874 out[info.outOffset[j]] += 0.5;
9875 out[info.outOffset[j]] *= scale;
9878 out += info.outJump;
9881 else if (info.inFormat == RTAUDIO_SINT32) {
9882 Int32 *in = (Int32 *)inBuffer;
9883 scale = 1.0 / 2147483647.5;
9884 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9885 for (j=0; j<info.channels; j++) {
9886 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
9887 out[info.outOffset[j]] += 0.5;
9888 out[info.outOffset[j]] *= scale;
9891 out += info.outJump;
9894 else if (info.inFormat == RTAUDIO_FLOAT32) {
9895 Float32 *in = (Float32 *)inBuffer;
9896 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9897 for (j=0; j<info.channels; j++) {
9898 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
9901 out += info.outJump;
9904 else if (info.inFormat == RTAUDIO_FLOAT64) {
9905 // Channel compensation and/or (de)interleaving only.
9906 Float64 *in = (Float64 *)inBuffer;
9907 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9908 for (j=0; j<info.channels; j++) {
9909 out[info.outOffset[j]] = in[info.inOffset[j]];
9912 out += info.outJump;
9916 else if (info.outFormat == RTAUDIO_FLOAT32) {
9918 Float32 *out = (Float32 *)outBuffer;
9920 if (info.inFormat == RTAUDIO_SINT8) {
9921 signed char *in = (signed char *)inBuffer;
9922 scale = (Float32) ( 1.0 / 127.5 );
9923 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9924 for (j=0; j<info.channels; j++) {
9925 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
9926 out[info.outOffset[j]] += 0.5;
9927 out[info.outOffset[j]] *= scale;
9930 out += info.outJump;
9933 else if (info.inFormat == RTAUDIO_SINT16) {
9934 Int16 *in = (Int16 *)inBuffer;
9935 scale = (Float32) ( 1.0 / 32767.5 );
9936 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9937 for (j=0; j<info.channels; j++) {
9938 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
9939 out[info.outOffset[j]] += 0.5;
9940 out[info.outOffset[j]] *= scale;
9943 out += info.outJump;
9946 else if (info.inFormat == RTAUDIO_SINT24) {
9947 Int24 *in = (Int24 *)inBuffer;
9948 scale = (Float32) ( 1.0 / 8388607.5 );
9949 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9950 for (j=0; j<info.channels; j++) {
9951 out[info.outOffset[j]] = (Float32) (in[info.inOffset[j]].asInt());
9952 out[info.outOffset[j]] += 0.5;
9953 out[info.outOffset[j]] *= scale;
9956 out += info.outJump;
9959 else if (info.inFormat == RTAUDIO_SINT32) {
9960 Int32 *in = (Int32 *)inBuffer;
9961 scale = (Float32) ( 1.0 / 2147483647.5 );
9962 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9963 for (j=0; j<info.channels; j++) {
9964 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
9965 out[info.outOffset[j]] += 0.5;
9966 out[info.outOffset[j]] *= scale;
9969 out += info.outJump;
9972 else if (info.inFormat == RTAUDIO_FLOAT32) {
9973 // Channel compensation and/or (de)interleaving only.
9974 Float32 *in = (Float32 *)inBuffer;
9975 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9976 for (j=0; j<info.channels; j++) {
9977 out[info.outOffset[j]] = in[info.inOffset[j]];
9980 out += info.outJump;
9983 else if (info.inFormat == RTAUDIO_FLOAT64) {
9984 Float64 *in = (Float64 *)inBuffer;
9985 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9986 for (j=0; j<info.channels; j++) {
9987 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
9990 out += info.outJump;
9994 else if (info.outFormat == RTAUDIO_SINT32) {
9995 Int32 *out = (Int32 *)outBuffer;
9996 if (info.inFormat == RTAUDIO_SINT8) {
9997 signed char *in = (signed char *)inBuffer;
9998 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9999 for (j=0; j<info.channels; j++) {
10000 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
10001 out[info.outOffset[j]] <<= 24;
10004 out += info.outJump;
10007 else if (info.inFormat == RTAUDIO_SINT16) {
10008 Int16 *in = (Int16 *)inBuffer;
10009 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10010 for (j=0; j<info.channels; j++) {
10011 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
10012 out[info.outOffset[j]] <<= 16;
10015 out += info.outJump;
10018 else if (info.inFormat == RTAUDIO_SINT24) {
10019 Int24 *in = (Int24 *)inBuffer;
10020 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10021 for (j=0; j<info.channels; j++) {
10022 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]].asInt();
10023 out[info.outOffset[j]] <<= 8;
10026 out += info.outJump;
10029 else if (info.inFormat == RTAUDIO_SINT32) {
10030 // Channel compensation and/or (de)interleaving only.
10031 Int32 *in = (Int32 *)inBuffer;
10032 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10033 for (j=0; j<info.channels; j++) {
10034 out[info.outOffset[j]] = in[info.inOffset[j]];
10037 out += info.outJump;
10040 else if (info.inFormat == RTAUDIO_FLOAT32) {
10041 Float32 *in = (Float32 *)inBuffer;
10042 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10043 for (j=0; j<info.channels; j++) {
10044 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
10047 out += info.outJump;
10050 else if (info.inFormat == RTAUDIO_FLOAT64) {
10051 Float64 *in = (Float64 *)inBuffer;
10052 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10053 for (j=0; j<info.channels; j++) {
10054 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
10057 out += info.outJump;
10061 else if (info.outFormat == RTAUDIO_SINT24) {
10062 Int24 *out = (Int24 *)outBuffer;
10063 if (info.inFormat == RTAUDIO_SINT8) {
10064 signed char *in = (signed char *)inBuffer;
10065 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10066 for (j=0; j<info.channels; j++) {
10067 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 16);
10068 //out[info.outOffset[j]] <<= 16;
10071 out += info.outJump;
10074 else if (info.inFormat == RTAUDIO_SINT16) {
10075 Int16 *in = (Int16 *)inBuffer;
10076 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10077 for (j=0; j<info.channels; j++) {
10078 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 8);
10079 //out[info.outOffset[j]] <<= 8;
10082 out += info.outJump;
10085 else if (info.inFormat == RTAUDIO_SINT24) {
10086 // Channel compensation and/or (de)interleaving only.
10087 Int24 *in = (Int24 *)inBuffer;
10088 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10089 for (j=0; j<info.channels; j++) {
10090 out[info.outOffset[j]] = in[info.inOffset[j]];
10093 out += info.outJump;
10096 else if (info.inFormat == RTAUDIO_SINT32) {
10097 Int32 *in = (Int32 *)inBuffer;
10098 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10099 for (j=0; j<info.channels; j++) {
10100 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] >> 8);
10101 //out[info.outOffset[j]] >>= 8;
10104 out += info.outJump;
10107 else if (info.inFormat == RTAUDIO_FLOAT32) {
10108 Float32 *in = (Float32 *)inBuffer;
10109 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10110 for (j=0; j<info.channels; j++) {
10111 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
10114 out += info.outJump;
10117 else if (info.inFormat == RTAUDIO_FLOAT64) {
10118 Float64 *in = (Float64 *)inBuffer;
10119 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10120 for (j=0; j<info.channels; j++) {
10121 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
10124 out += info.outJump;
10128 else if (info.outFormat == RTAUDIO_SINT16) {
10129 Int16 *out = (Int16 *)outBuffer;
10130 if (info.inFormat == RTAUDIO_SINT8) {
10131 signed char *in = (signed char *)inBuffer;
10132 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10133 for (j=0; j<info.channels; j++) {
10134 out[info.outOffset[j]] = (Int16) in[info.inOffset[j]];
10135 out[info.outOffset[j]] <<= 8;
10138 out += info.outJump;
10141 else if (info.inFormat == RTAUDIO_SINT16) {
10142 // Channel compensation and/or (de)interleaving only.
10143 Int16 *in = (Int16 *)inBuffer;
10144 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10145 for (j=0; j<info.channels; j++) {
10146 out[info.outOffset[j]] = in[info.inOffset[j]];
10149 out += info.outJump;
10152 else if (info.inFormat == RTAUDIO_SINT24) {
10153 Int24 *in = (Int24 *)inBuffer;
10154 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10155 for (j=0; j<info.channels; j++) {
10156 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]].asInt() >> 8);
10159 out += info.outJump;
10162 else if (info.inFormat == RTAUDIO_SINT32) {
10163 Int32 *in = (Int32 *)inBuffer;
10164 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10165 for (j=0; j<info.channels; j++) {
10166 out[info.outOffset[j]] = (Int16) ((in[info.inOffset[j]] >> 16) & 0x0000ffff);
10169 out += info.outJump;
10172 else if (info.inFormat == RTAUDIO_FLOAT32) {
10173 Float32 *in = (Float32 *)inBuffer;
10174 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10175 for (j=0; j<info.channels; j++) {
10176 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
10179 out += info.outJump;
10182 else if (info.inFormat == RTAUDIO_FLOAT64) {
10183 Float64 *in = (Float64 *)inBuffer;
10184 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10185 for (j=0; j<info.channels; j++) {
10186 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
10189 out += info.outJump;
10193 else if (info.outFormat == RTAUDIO_SINT8) {
10194 signed char *out = (signed char *)outBuffer;
10195 if (info.inFormat == RTAUDIO_SINT8) {
10196 // Channel compensation and/or (de)interleaving only.
10197 signed char *in = (signed char *)inBuffer;
10198 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10199 for (j=0; j<info.channels; j++) {
10200 out[info.outOffset[j]] = in[info.inOffset[j]];
10203 out += info.outJump;
10206 if (info.inFormat == RTAUDIO_SINT16) {
10207 Int16 *in = (Int16 *)inBuffer;
10208 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10209 for (j=0; j<info.channels; j++) {
10210 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 8) & 0x00ff);
10213 out += info.outJump;
10216 else if (info.inFormat == RTAUDIO_SINT24) {
10217 Int24 *in = (Int24 *)inBuffer;
10218 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10219 for (j=0; j<info.channels; j++) {
10220 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]].asInt() >> 16);
10223 out += info.outJump;
10226 else if (info.inFormat == RTAUDIO_SINT32) {
10227 Int32 *in = (Int32 *)inBuffer;
10228 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10229 for (j=0; j<info.channels; j++) {
10230 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 24) & 0x000000ff);
10233 out += info.outJump;
10236 else if (info.inFormat == RTAUDIO_FLOAT32) {
10237 Float32 *in = (Float32 *)inBuffer;
10238 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10239 for (j=0; j<info.channels; j++) {
10240 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
10243 out += info.outJump;
10246 else if (info.inFormat == RTAUDIO_FLOAT64) {
10247 Float64 *in = (Float64 *)inBuffer;
10248 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10249 for (j=0; j<info.channels; j++) {
10250 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
10253 out += info.outJump;
10259 //static inline uint16_t bswap_16(uint16_t x) { return (x>>8) | (x<<8); }
10260 //static inline uint32_t bswap_32(uint32_t x) { return (bswap_16(x&0xffff)<<16) | (bswap_16(x>>16)); }
10261 //static inline uint64_t bswap_64(uint64_t x) { return (((unsigned long long)bswap_32(x&0xffffffffull))<<32) | (bswap_32(x>>32)); }
10263 void RtApi :: byteSwapBuffer( char *buffer, unsigned int samples, RtAudioFormat format )
10269 if ( format == RTAUDIO_SINT16 ) {
10270 for ( unsigned int i=0; i<samples; i++ ) {
10271 // Swap 1st and 2nd bytes.
10276 // Increment 2 bytes.
10280 else if ( format == RTAUDIO_SINT32 ||
10281 format == RTAUDIO_FLOAT32 ) {
10282 for ( unsigned int i=0; i<samples; i++ ) {
10283 // Swap 1st and 4th bytes.
10288 // Swap 2nd and 3rd bytes.
10294 // Increment 3 more bytes.
10298 else if ( format == RTAUDIO_SINT24 ) {
10299 for ( unsigned int i=0; i<samples; i++ ) {
10300 // Swap 1st and 3rd bytes.
10305 // Increment 2 more bytes.
10309 else if ( format == RTAUDIO_FLOAT64 ) {
10310 for ( unsigned int i=0; i<samples; i++ ) {
10311 // Swap 1st and 8th bytes
10316 // Swap 2nd and 7th bytes
10322 // Swap 3rd and 6th bytes
10328 // Swap 4th and 5th bytes
10334 // Increment 5 more bytes.
10340 // Indentation settings for Vim and Emacs
10342 // Local Variables:
10343 // c-basic-offset: 2
10344 // indent-tabs-mode: nil
10347 // vim: et sts=2 sw=2