1 /************************************************************************/
3 \brief Realtime audio i/o C++ classes.
5 RtAudio provides a common API (Application Programming Interface)
6 for realtime audio input/output across Linux (native ALSA, Jack,
7 and OSS), Macintosh OS X (CoreAudio and Jack), and Windows
8 (DirectSound, ASIO and WASAPI) operating systems.
10 RtAudio WWW site: http://www.music.mcgill.ca/~gary/rtaudio/
12 RtAudio: realtime audio i/o C++ classes
13 Copyright (c) 2001-2017 Gary P. Scavone
15 Permission is hereby granted, free of charge, to any person
16 obtaining a copy of this software and associated documentation files
17 (the "Software"), to deal in the Software without restriction,
18 including without limitation the rights to use, copy, modify, merge,
19 publish, distribute, sublicense, and/or sell copies of the Software,
20 and to permit persons to whom the Software is furnished to do so,
21 subject to the following conditions:
23 The above copyright notice and this permission notice shall be
24 included in all copies or substantial portions of the Software.
26 Any person wishing to distribute modifications to the Software is
27 asked to send the modifications to the original developer so that
28 they can be incorporated into the canonical version. This is,
29 however, not a binding provision of this license.
31 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
32 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
33 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
34 IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
35 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
36 CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
37 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
39 /************************************************************************/
41 // RtAudio: Version 5.0.0
51 // Static variable definitions.
52 const unsigned int RtApi::MAX_SAMPLE_RATES = 14;
53 const unsigned int RtApi::SAMPLE_RATES[] = {
54 4000, 5512, 8000, 9600, 11025, 16000, 22050,
55 32000, 44100, 48000, 88200, 96000, 176400, 192000
58 #if defined(__WINDOWS_DS__) || defined(__WINDOWS_ASIO__) || defined(__WINDOWS_WASAPI__)
59 #define MUTEX_INITIALIZE(A) InitializeCriticalSection(A)
60 #define MUTEX_DESTROY(A) DeleteCriticalSection(A)
61 #define MUTEX_LOCK(A) EnterCriticalSection(A)
62 #define MUTEX_UNLOCK(A) LeaveCriticalSection(A)
66 static std::string convertCharPointerToStdString(const char *text)
68 return std::string(text);
71 static std::string convertCharPointerToStdString(const wchar_t *text)
73 int length = WideCharToMultiByte(CP_UTF8, 0, text, -1, NULL, 0, NULL, NULL);
74 std::string s( length-1, '\0' );
75 WideCharToMultiByte(CP_UTF8, 0, text, -1, &s[0], length, NULL, NULL);
79 #elif defined(__LINUX_ALSA__) || defined(__LINUX_PULSE__) || defined(__UNIX_JACK__) || defined(__LINUX_OSS__) || defined(__MACOSX_CORE__)
81 #define MUTEX_INITIALIZE(A) pthread_mutex_init(A, NULL)
82 #define MUTEX_DESTROY(A) pthread_mutex_destroy(A)
83 #define MUTEX_LOCK(A) pthread_mutex_lock(A)
84 #define MUTEX_UNLOCK(A) pthread_mutex_unlock(A)
86 #define MUTEX_INITIALIZE(A) abs(*A) // dummy definitions
87 #define MUTEX_DESTROY(A) abs(*A) // dummy definitions
90 // *************************************************** //
92 // RtAudio definitions.
94 // *************************************************** //
96 std::string RtAudio :: getVersion( void )
98 return RTAUDIO_VERSION;
101 void RtAudio :: getCompiledApi( std::vector<RtAudio::Api> &apis )
105 // The order here will control the order of RtAudio's API search in
107 #if defined(__UNIX_JACK__)
108 apis.push_back( UNIX_JACK );
110 #if defined(__LINUX_PULSE__)
111 apis.push_back( LINUX_PULSE );
113 #if defined(__LINUX_ALSA__)
114 apis.push_back( LINUX_ALSA );
116 #if defined(__LINUX_OSS__)
117 apis.push_back( LINUX_OSS );
119 #if defined(__WINDOWS_ASIO__)
120 apis.push_back( WINDOWS_ASIO );
122 #if defined(__WINDOWS_WASAPI__)
123 apis.push_back( WINDOWS_WASAPI );
125 #if defined(__WINDOWS_DS__)
126 apis.push_back( WINDOWS_DS );
128 #if defined(__MACOSX_CORE__)
129 apis.push_back( MACOSX_CORE );
131 #if defined(__RTAUDIO_DUMMY__)
132 apis.push_back( RTAUDIO_DUMMY );
136 const std::string &RtAudio :: getCompiledApiName( RtAudio::Api api )
138 #if defined(__UNIX_JACK__)
139 if ( api == UNIX_JACK ) {
140 static std::string name( "jack" );
144 #if defined(__LINUX_PULSE__)
145 if ( api == LINUX_PULSE ) {
146 static std::string name( "pulse" );
150 #if defined(__LINUX_ALSA__)
151 if ( api == LINUX_ALSA ) {
152 static std::string name( "alsa" );
156 #if defined(__LINUX_OSS__)
157 if ( api == LINUX_OSS ) {
158 static std::string name( "oss" );
162 #if defined(__WINDOWS_ASIO__)
163 if ( api == WINDOWS_ASIO ) {
164 static std::string name( "asio" );
168 #if defined(__WINDOWS_WASAPI__)
169 if ( api == WINDOWS_WASAPI ) {
170 static std::string name( "wasapi" );
174 #if defined(__WINDOWS_DS__)
175 if ( api == WINDOWS_DS ) {
176 static std::string name( "ds" );
180 #if defined(__MACOSX_CORE__)
181 if ( api == MACOSX_CORE ) {
182 static std::string name( "core" );
186 #if defined(__RTAUDIO_DUMMY__)
187 if ( api == RTAUDIO_DUMMY ) {
188 static std::string name( "dummy" );
192 static std::string name;
196 const std::string &RtAudio :: getCompiledApiDisplayName( RtAudio::Api api )
198 #if defined(__UNIX_JACK__)
199 if ( api == UNIX_JACK ) {
200 static std::string name( "JACK" );
204 #if defined(__LINUX_PULSE__)
205 if ( api == LINUX_PULSE ) {
206 static std::string name( "PulseAudio" );
210 #if defined(__LINUX_ALSA__)
211 if ( api == LINUX_ALSA ) {
212 static std::string name( "ALSA" );
216 #if defined(__LINUX_OSS__)
217 if ( api == LINUX_OSS ) {
218 static std::string name( "OSS" );
222 #if defined(__WINDOWS_ASIO__)
223 if ( api == WINDOWS_ASIO ) {
224 static std::string name( "ASIO" );
228 #if defined(__WINDOWS_WASAPI__)
229 if ( api == WINDOWS_WASAPI ) {
230 static std::string name( "WASAPI" );
234 #if defined(__WINDOWS_DS__)
235 if ( api == WINDOWS_DS ) {
236 static std::string name( "DirectSound" );
240 #if defined(__MACOSX_CORE__)
241 if ( api == MACOSX_CORE ) {
242 static std::string name( "Core Audio" );
246 #if defined(__RTAUDIO_DUMMY__)
247 if ( api == RTAUDIO_DUMMY ) {
248 static std::string name( "RtAudio Dummy" );
252 static std::string name;
256 RtAudio::Api RtAudio :: getCompiledApiByName( const std::string &name )
258 unsigned int api_number = RtAudio::UNSPECIFIED;
259 size_t nameLength = name.size();
261 if ( nameLength == 0 )
262 return RtAudio::UNSPECIFIED;
264 while ( api_number <= RtAudio::RTAUDIO_DUMMY ) {
265 const std::string &otherName =
266 getCompiledApiName((RtAudio::Api)api_number);
268 if ( name == otherName )
269 return (RtAudio::Api)api_number;
274 return RtAudio::UNSPECIFIED;
277 void RtAudio :: openRtApi( RtAudio::Api api )
283 #if defined(__UNIX_JACK__)
284 if ( api == UNIX_JACK )
285 rtapi_ = new RtApiJack();
287 #if defined(__LINUX_ALSA__)
288 if ( api == LINUX_ALSA )
289 rtapi_ = new RtApiAlsa();
291 #if defined(__LINUX_PULSE__)
292 if ( api == LINUX_PULSE )
293 rtapi_ = new RtApiPulse();
295 #if defined(__LINUX_OSS__)
296 if ( api == LINUX_OSS )
297 rtapi_ = new RtApiOss();
299 #if defined(__WINDOWS_ASIO__)
300 if ( api == WINDOWS_ASIO )
301 rtapi_ = new RtApiAsio();
303 #if defined(__WINDOWS_WASAPI__)
304 if ( api == WINDOWS_WASAPI )
305 rtapi_ = new RtApiWasapi();
307 #if defined(__WINDOWS_DS__)
308 if ( api == WINDOWS_DS )
309 rtapi_ = new RtApiDs();
311 #if defined(__MACOSX_CORE__)
312 if ( api == MACOSX_CORE )
313 rtapi_ = new RtApiCore();
315 #if defined(__RTAUDIO_DUMMY__)
316 if ( api == RTAUDIO_DUMMY )
317 rtapi_ = new RtApiDummy();
321 RtAudio :: RtAudio( RtAudio::Api api )
325 if ( api != UNSPECIFIED ) {
326 // Attempt to open the specified API.
328 if ( rtapi_ ) return;
330 // No compiled support for specified API value. Issue a debug
331 // warning and continue as if no API was specified.
332 std::cerr << "\nRtAudio: no compiled support for specified API argument!\n" << std::endl;
335 // Iterate through the compiled APIs and return as soon as we find
336 // one with at least one device or we reach the end of the list.
337 std::vector< RtAudio::Api > apis;
338 getCompiledApi( apis );
339 for ( unsigned int i=0; i<apis.size(); i++ ) {
340 openRtApi( apis[i] );
341 if ( rtapi_ && rtapi_->getDeviceCount() ) break;
344 if ( rtapi_ ) return;
346 // It should not be possible to get here because the preprocessor
347 // definition __RTAUDIO_DUMMY__ is automatically defined if no
348 // API-specific definitions are passed to the compiler. But just in
349 // case something weird happens, we'll thow an error.
350 std::string errorText = "\nRtAudio: no compiled API support found ... critical error!!\n\n";
351 throw( RtAudioError( errorText, RtAudioError::UNSPECIFIED ) );
354 RtAudio :: ~RtAudio()
360 void RtAudio :: openStream( RtAudio::StreamParameters *outputParameters,
361 RtAudio::StreamParameters *inputParameters,
362 RtAudioFormat format, unsigned int sampleRate,
363 unsigned int *bufferFrames,
364 RtAudioCallback callback, void *userData,
365 RtAudio::StreamOptions *options,
366 RtAudioErrorCallback errorCallback )
368 return rtapi_->openStream( outputParameters, inputParameters, format,
369 sampleRate, bufferFrames, callback,
370 userData, options, errorCallback );
373 // *************************************************** //
375 // Public RtApi definitions (see end of file for
376 // private or protected utility functions).
378 // *************************************************** //
382 stream_.state = STREAM_CLOSED;
383 stream_.mode = UNINITIALIZED;
384 stream_.apiHandle = 0;
385 stream_.userBuffer[0] = 0;
386 stream_.userBuffer[1] = 0;
387 MUTEX_INITIALIZE( &stream_.mutex );
388 showWarnings_ = true;
389 firstErrorOccurred_ = false;
394 MUTEX_DESTROY( &stream_.mutex );
397 void RtApi :: openStream( RtAudio::StreamParameters *oParams,
398 RtAudio::StreamParameters *iParams,
399 RtAudioFormat format, unsigned int sampleRate,
400 unsigned int *bufferFrames,
401 RtAudioCallback callback, void *userData,
402 RtAudio::StreamOptions *options,
403 RtAudioErrorCallback errorCallback )
405 if ( stream_.state != STREAM_CLOSED ) {
406 errorText_ = "RtApi::openStream: a stream is already open!";
407 error( RtAudioError::INVALID_USE );
411 // Clear stream information potentially left from a previously open stream.
414 if ( oParams && oParams->nChannels < 1 ) {
415 errorText_ = "RtApi::openStream: a non-NULL output StreamParameters structure cannot have an nChannels value less than one.";
416 error( RtAudioError::INVALID_USE );
420 if ( iParams && iParams->nChannels < 1 ) {
421 errorText_ = "RtApi::openStream: a non-NULL input StreamParameters structure cannot have an nChannels value less than one.";
422 error( RtAudioError::INVALID_USE );
426 if ( oParams == NULL && iParams == NULL ) {
427 errorText_ = "RtApi::openStream: input and output StreamParameters structures are both NULL!";
428 error( RtAudioError::INVALID_USE );
432 if ( formatBytes(format) == 0 ) {
433 errorText_ = "RtApi::openStream: 'format' parameter value is undefined.";
434 error( RtAudioError::INVALID_USE );
438 unsigned int nDevices = getDeviceCount();
439 unsigned int oChannels = 0;
441 oChannels = oParams->nChannels;
442 if ( oParams->deviceId >= nDevices ) {
443 errorText_ = "RtApi::openStream: output device parameter value is invalid.";
444 error( RtAudioError::INVALID_USE );
449 unsigned int iChannels = 0;
451 iChannels = iParams->nChannels;
452 if ( iParams->deviceId >= nDevices ) {
453 errorText_ = "RtApi::openStream: input device parameter value is invalid.";
454 error( RtAudioError::INVALID_USE );
461 if ( oChannels > 0 ) {
463 result = probeDeviceOpen( oParams->deviceId, OUTPUT, oChannels, oParams->firstChannel,
464 sampleRate, format, bufferFrames, options );
465 if ( result == false ) {
466 error( RtAudioError::SYSTEM_ERROR );
471 if ( iChannels > 0 ) {
473 result = probeDeviceOpen( iParams->deviceId, INPUT, iChannels, iParams->firstChannel,
474 sampleRate, format, bufferFrames, options );
475 if ( result == false ) {
476 if ( oChannels > 0 ) closeStream();
477 error( RtAudioError::SYSTEM_ERROR );
482 stream_.callbackInfo.callback = (void *) callback;
483 stream_.callbackInfo.userData = userData;
484 stream_.callbackInfo.errorCallback = (void *) errorCallback;
486 if ( options ) options->numberOfBuffers = stream_.nBuffers;
487 stream_.state = STREAM_STOPPED;
490 unsigned int RtApi :: getDefaultInputDevice( void )
492 // Should be implemented in subclasses if possible.
496 unsigned int RtApi :: getDefaultOutputDevice( void )
498 // Should be implemented in subclasses if possible.
502 void RtApi :: closeStream( void )
504 // MUST be implemented in subclasses!
508 bool RtApi :: probeDeviceOpen( unsigned int /*device*/, StreamMode /*mode*/, unsigned int /*channels*/,
509 unsigned int /*firstChannel*/, unsigned int /*sampleRate*/,
510 RtAudioFormat /*format*/, unsigned int * /*bufferSize*/,
511 RtAudio::StreamOptions * /*options*/ )
513 // MUST be implemented in subclasses!
517 void RtApi :: tickStreamTime( void )
519 // Subclasses that do not provide their own implementation of
520 // getStreamTime should call this function once per buffer I/O to
521 // provide basic stream time support.
523 stream_.streamTime += ( stream_.bufferSize * 1.0 / stream_.sampleRate );
525 #if defined( HAVE_GETTIMEOFDAY )
526 gettimeofday( &stream_.lastTickTimestamp, NULL );
530 long RtApi :: getStreamLatency( void )
534 long totalLatency = 0;
535 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
536 totalLatency = stream_.latency[0];
537 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
538 totalLatency += stream_.latency[1];
543 double RtApi :: getStreamTime( void )
547 #if defined( HAVE_GETTIMEOFDAY )
548 // Return a very accurate estimate of the stream time by
549 // adding in the elapsed time since the last tick.
553 if ( stream_.state != STREAM_RUNNING || stream_.streamTime == 0.0 )
554 return stream_.streamTime;
556 gettimeofday( &now, NULL );
557 then = stream_.lastTickTimestamp;
558 return stream_.streamTime +
559 ((now.tv_sec + 0.000001 * now.tv_usec) -
560 (then.tv_sec + 0.000001 * then.tv_usec));
562 return stream_.streamTime;
566 void RtApi :: setStreamTime( double time )
571 stream_.streamTime = time;
572 #if defined( HAVE_GETTIMEOFDAY )
573 gettimeofday( &stream_.lastTickTimestamp, NULL );
577 unsigned int RtApi :: getStreamSampleRate( void )
581 return stream_.sampleRate;
585 // *************************************************** //
587 // OS/API-specific methods.
589 // *************************************************** //
591 #if defined(__MACOSX_CORE__)
593 // The OS X CoreAudio API is designed to use a separate callback
594 // procedure for each of its audio devices. A single RtAudio duplex
595 // stream using two different devices is supported here, though it
596 // cannot be guaranteed to always behave correctly because we cannot
597 // synchronize these two callbacks.
599 // A property listener is installed for over/underrun information.
600 // However, no functionality is currently provided to allow property
601 // listeners to trigger user handlers because it is unclear what could
602 // be done if a critical stream parameter (buffer size, sample rate,
603 // device disconnect) notification arrived. The listeners entail
604 // quite a bit of extra code and most likely, a user program wouldn't
605 // be prepared for the result anyway. However, we do provide a flag
606 // to the client callback function to inform of an over/underrun.
608 // A structure to hold various information related to the CoreAudio API
611 AudioDeviceID id[2]; // device ids
612 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
613 AudioDeviceIOProcID procId[2];
615 UInt32 iStream[2]; // device stream index (or first if using multiple)
616 UInt32 nStreams[2]; // number of streams to use
619 pthread_cond_t condition;
620 int drainCounter; // Tracks callback counts when draining
621 bool internalDrain; // Indicates if stop is initiated from callback or not.
624 :deviceBuffer(0), drainCounter(0), internalDrain(false) { nStreams[0] = 1; nStreams[1] = 1; id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
627 RtApiCore:: RtApiCore()
629 #if defined( AVAILABLE_MAC_OS_X_VERSION_10_6_AND_LATER )
630 // This is a largely undocumented but absolutely necessary
631 // requirement starting with OS-X 10.6. If not called, queries and
632 // updates to various audio device properties are not handled
634 CFRunLoopRef theRunLoop = NULL;
635 AudioObjectPropertyAddress property = { kAudioHardwarePropertyRunLoop,
636 kAudioObjectPropertyScopeGlobal,
637 kAudioObjectPropertyElementMaster };
638 OSStatus result = AudioObjectSetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, sizeof(CFRunLoopRef), &theRunLoop);
639 if ( result != noErr ) {
640 errorText_ = "RtApiCore::RtApiCore: error setting run loop property!";
641 error( RtAudioError::WARNING );
646 RtApiCore :: ~RtApiCore()
648 // The subclass destructor gets called before the base class
649 // destructor, so close an existing stream before deallocating
650 // apiDeviceId memory.
651 if ( stream_.state != STREAM_CLOSED ) closeStream();
654 unsigned int RtApiCore :: getDeviceCount( void )
656 // Find out how many audio devices there are, if any.
658 AudioObjectPropertyAddress propertyAddress = { kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
659 OSStatus result = AudioObjectGetPropertyDataSize( kAudioObjectSystemObject, &propertyAddress, 0, NULL, &dataSize );
660 if ( result != noErr ) {
661 errorText_ = "RtApiCore::getDeviceCount: OS-X error getting device info!";
662 error( RtAudioError::WARNING );
666 return dataSize / sizeof( AudioDeviceID );
669 unsigned int RtApiCore :: getDefaultInputDevice( void )
671 unsigned int nDevices = getDeviceCount();
672 if ( nDevices <= 1 ) return 0;
675 UInt32 dataSize = sizeof( AudioDeviceID );
676 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultInputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
677 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
678 if ( result != noErr ) {
679 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device.";
680 error( RtAudioError::WARNING );
684 dataSize *= nDevices;
685 AudioDeviceID deviceList[ nDevices ];
686 property.mSelector = kAudioHardwarePropertyDevices;
687 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
688 if ( result != noErr ) {
689 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device IDs.";
690 error( RtAudioError::WARNING );
694 for ( unsigned int i=0; i<nDevices; i++ )
695 if ( id == deviceList[i] ) return i;
697 errorText_ = "RtApiCore::getDefaultInputDevice: No default device found!";
698 error( RtAudioError::WARNING );
702 unsigned int RtApiCore :: getDefaultOutputDevice( void )
704 unsigned int nDevices = getDeviceCount();
705 if ( nDevices <= 1 ) return 0;
708 UInt32 dataSize = sizeof( AudioDeviceID );
709 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultOutputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
710 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
711 if ( result != noErr ) {
712 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device.";
713 error( RtAudioError::WARNING );
717 dataSize = sizeof( AudioDeviceID ) * nDevices;
718 AudioDeviceID deviceList[ nDevices ];
719 property.mSelector = kAudioHardwarePropertyDevices;
720 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
721 if ( result != noErr ) {
722 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device IDs.";
723 error( RtAudioError::WARNING );
727 for ( unsigned int i=0; i<nDevices; i++ )
728 if ( id == deviceList[i] ) return i;
730 errorText_ = "RtApiCore::getDefaultOutputDevice: No default device found!";
731 error( RtAudioError::WARNING );
735 RtAudio::DeviceInfo RtApiCore :: getDeviceInfo( unsigned int device )
737 RtAudio::DeviceInfo info;
741 unsigned int nDevices = getDeviceCount();
742 if ( nDevices == 0 ) {
743 errorText_ = "RtApiCore::getDeviceInfo: no devices found!";
744 error( RtAudioError::INVALID_USE );
748 if ( device >= nDevices ) {
749 errorText_ = "RtApiCore::getDeviceInfo: device ID is invalid!";
750 error( RtAudioError::INVALID_USE );
754 AudioDeviceID deviceList[ nDevices ];
755 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
756 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
757 kAudioObjectPropertyScopeGlobal,
758 kAudioObjectPropertyElementMaster };
759 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
760 0, NULL, &dataSize, (void *) &deviceList );
761 if ( result != noErr ) {
762 errorText_ = "RtApiCore::getDeviceInfo: OS-X system error getting device IDs.";
763 error( RtAudioError::WARNING );
767 AudioDeviceID id = deviceList[ device ];
769 // Get the device name.
772 dataSize = sizeof( CFStringRef );
773 property.mSelector = kAudioObjectPropertyManufacturer;
774 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
775 if ( result != noErr ) {
776 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device manufacturer.";
777 errorText_ = errorStream_.str();
778 error( RtAudioError::WARNING );
782 //const char *mname = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
783 int length = CFStringGetLength(cfname);
784 char *mname = (char *)malloc(length * 3 + 1);
785 #if defined( UNICODE ) || defined( _UNICODE )
786 CFStringGetCString(cfname, mname, length * 3 + 1, kCFStringEncodingUTF8);
788 CFStringGetCString(cfname, mname, length * 3 + 1, CFStringGetSystemEncoding());
790 info.name.append( (const char *)mname, strlen(mname) );
791 info.name.append( ": " );
795 property.mSelector = kAudioObjectPropertyName;
796 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
797 if ( result != noErr ) {
798 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device name.";
799 errorText_ = errorStream_.str();
800 error( RtAudioError::WARNING );
804 //const char *name = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
805 length = CFStringGetLength(cfname);
806 char *name = (char *)malloc(length * 3 + 1);
807 #if defined( UNICODE ) || defined( _UNICODE )
808 CFStringGetCString(cfname, name, length * 3 + 1, kCFStringEncodingUTF8);
810 CFStringGetCString(cfname, name, length * 3 + 1, CFStringGetSystemEncoding());
812 info.name.append( (const char *)name, strlen(name) );
816 // Get the output stream "configuration".
817 AudioBufferList *bufferList = nil;
818 property.mSelector = kAudioDevicePropertyStreamConfiguration;
819 property.mScope = kAudioDevicePropertyScopeOutput;
820 // property.mElement = kAudioObjectPropertyElementWildcard;
822 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
823 if ( result != noErr || dataSize == 0 ) {
824 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration info for device (" << device << ").";
825 errorText_ = errorStream_.str();
826 error( RtAudioError::WARNING );
830 // Allocate the AudioBufferList.
831 bufferList = (AudioBufferList *) malloc( dataSize );
832 if ( bufferList == NULL ) {
833 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating output AudioBufferList.";
834 error( RtAudioError::WARNING );
838 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
839 if ( result != noErr || dataSize == 0 ) {
841 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration for device (" << device << ").";
842 errorText_ = errorStream_.str();
843 error( RtAudioError::WARNING );
847 // Get output channel information.
848 unsigned int i, nStreams = bufferList->mNumberBuffers;
849 for ( i=0; i<nStreams; i++ )
850 info.outputChannels += bufferList->mBuffers[i].mNumberChannels;
853 // Get the input stream "configuration".
854 property.mScope = kAudioDevicePropertyScopeInput;
855 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
856 if ( result != noErr || dataSize == 0 ) {
857 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration info for device (" << device << ").";
858 errorText_ = errorStream_.str();
859 error( RtAudioError::WARNING );
863 // Allocate the AudioBufferList.
864 bufferList = (AudioBufferList *) malloc( dataSize );
865 if ( bufferList == NULL ) {
866 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating input AudioBufferList.";
867 error( RtAudioError::WARNING );
871 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
872 if (result != noErr || dataSize == 0) {
874 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration for device (" << device << ").";
875 errorText_ = errorStream_.str();
876 error( RtAudioError::WARNING );
880 // Get input channel information.
881 nStreams = bufferList->mNumberBuffers;
882 for ( i=0; i<nStreams; i++ )
883 info.inputChannels += bufferList->mBuffers[i].mNumberChannels;
886 // If device opens for both playback and capture, we determine the channels.
887 if ( info.outputChannels > 0 && info.inputChannels > 0 )
888 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
890 // Probe the device sample rates.
891 bool isInput = false;
892 if ( info.outputChannels == 0 ) isInput = true;
894 // Determine the supported sample rates.
895 property.mSelector = kAudioDevicePropertyAvailableNominalSampleRates;
896 if ( isInput == false ) property.mScope = kAudioDevicePropertyScopeOutput;
897 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
898 if ( result != kAudioHardwareNoError || dataSize == 0 ) {
899 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rate info.";
900 errorText_ = errorStream_.str();
901 error( RtAudioError::WARNING );
905 UInt32 nRanges = dataSize / sizeof( AudioValueRange );
906 AudioValueRange rangeList[ nRanges ];
907 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &rangeList );
908 if ( result != kAudioHardwareNoError ) {
909 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rates.";
910 errorText_ = errorStream_.str();
911 error( RtAudioError::WARNING );
915 // The sample rate reporting mechanism is a bit of a mystery. It
916 // seems that it can either return individual rates or a range of
917 // rates. I assume that if the min / max range values are the same,
918 // then that represents a single supported rate and if the min / max
919 // range values are different, the device supports an arbitrary
920 // range of values (though there might be multiple ranges, so we'll
921 // use the most conservative range).
922 Float64 minimumRate = 1.0, maximumRate = 10000000000.0;
923 bool haveValueRange = false;
924 info.sampleRates.clear();
925 for ( UInt32 i=0; i<nRanges; i++ ) {
926 if ( rangeList[i].mMinimum == rangeList[i].mMaximum ) {
927 unsigned int tmpSr = (unsigned int) rangeList[i].mMinimum;
928 info.sampleRates.push_back( tmpSr );
930 if ( !info.preferredSampleRate || ( tmpSr <= 48000 && tmpSr > info.preferredSampleRate ) )
931 info.preferredSampleRate = tmpSr;
934 haveValueRange = true;
935 if ( rangeList[i].mMinimum > minimumRate ) minimumRate = rangeList[i].mMinimum;
936 if ( rangeList[i].mMaximum < maximumRate ) maximumRate = rangeList[i].mMaximum;
940 if ( haveValueRange ) {
941 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
942 if ( SAMPLE_RATES[k] >= (unsigned int) minimumRate && SAMPLE_RATES[k] <= (unsigned int) maximumRate ) {
943 info.sampleRates.push_back( SAMPLE_RATES[k] );
945 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
946 info.preferredSampleRate = SAMPLE_RATES[k];
951 // Sort and remove any redundant values
952 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
953 info.sampleRates.erase( unique( info.sampleRates.begin(), info.sampleRates.end() ), info.sampleRates.end() );
955 if ( info.sampleRates.size() == 0 ) {
956 errorStream_ << "RtApiCore::probeDeviceInfo: No supported sample rates found for device (" << device << ").";
957 errorText_ = errorStream_.str();
958 error( RtAudioError::WARNING );
962 // CoreAudio always uses 32-bit floating point data for PCM streams.
963 // Thus, any other "physical" formats supported by the device are of
964 // no interest to the client.
965 info.nativeFormats = RTAUDIO_FLOAT32;
967 if ( info.outputChannels > 0 )
968 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
969 if ( info.inputChannels > 0 )
970 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
976 static OSStatus callbackHandler( AudioDeviceID inDevice,
977 const AudioTimeStamp* /*inNow*/,
978 const AudioBufferList* inInputData,
979 const AudioTimeStamp* /*inInputTime*/,
980 AudioBufferList* outOutputData,
981 const AudioTimeStamp* /*inOutputTime*/,
984 CallbackInfo *info = (CallbackInfo *) infoPointer;
986 RtApiCore *object = (RtApiCore *) info->object;
987 if ( object->callbackEvent( inDevice, inInputData, outOutputData ) == false )
988 return kAudioHardwareUnspecifiedError;
990 return kAudioHardwareNoError;
993 static OSStatus xrunListener( AudioObjectID /*inDevice*/,
995 const AudioObjectPropertyAddress properties[],
996 void* handlePointer )
998 CoreHandle *handle = (CoreHandle *) handlePointer;
999 for ( UInt32 i=0; i<nAddresses; i++ ) {
1000 if ( properties[i].mSelector == kAudioDeviceProcessorOverload ) {
1001 if ( properties[i].mScope == kAudioDevicePropertyScopeInput )
1002 handle->xrun[1] = true;
1004 handle->xrun[0] = true;
1008 return kAudioHardwareNoError;
1011 static OSStatus rateListener( AudioObjectID inDevice,
1012 UInt32 /*nAddresses*/,
1013 const AudioObjectPropertyAddress /*properties*/[],
1016 Float64 *rate = (Float64 *) ratePointer;
1017 UInt32 dataSize = sizeof( Float64 );
1018 AudioObjectPropertyAddress property = { kAudioDevicePropertyNominalSampleRate,
1019 kAudioObjectPropertyScopeGlobal,
1020 kAudioObjectPropertyElementMaster };
1021 AudioObjectGetPropertyData( inDevice, &property, 0, NULL, &dataSize, rate );
1022 return kAudioHardwareNoError;
1025 bool RtApiCore :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
1026 unsigned int firstChannel, unsigned int sampleRate,
1027 RtAudioFormat format, unsigned int *bufferSize,
1028 RtAudio::StreamOptions *options )
1031 unsigned int nDevices = getDeviceCount();
1032 if ( nDevices == 0 ) {
1033 // This should not happen because a check is made before this function is called.
1034 errorText_ = "RtApiCore::probeDeviceOpen: no devices found!";
1038 if ( device >= nDevices ) {
1039 // This should not happen because a check is made before this function is called.
1040 errorText_ = "RtApiCore::probeDeviceOpen: device ID is invalid!";
1044 AudioDeviceID deviceList[ nDevices ];
1045 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
1046 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
1047 kAudioObjectPropertyScopeGlobal,
1048 kAudioObjectPropertyElementMaster };
1049 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
1050 0, NULL, &dataSize, (void *) &deviceList );
1051 if ( result != noErr ) {
1052 errorText_ = "RtApiCore::probeDeviceOpen: OS-X system error getting device IDs.";
1056 AudioDeviceID id = deviceList[ device ];
1058 // Setup for stream mode.
1059 bool isInput = false;
1060 if ( mode == INPUT ) {
1062 property.mScope = kAudioDevicePropertyScopeInput;
1065 property.mScope = kAudioDevicePropertyScopeOutput;
1067 // Get the stream "configuration".
1068 AudioBufferList *bufferList = nil;
1070 property.mSelector = kAudioDevicePropertyStreamConfiguration;
1071 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
1072 if ( result != noErr || dataSize == 0 ) {
1073 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration info for device (" << device << ").";
1074 errorText_ = errorStream_.str();
1078 // Allocate the AudioBufferList.
1079 bufferList = (AudioBufferList *) malloc( dataSize );
1080 if ( bufferList == NULL ) {
1081 errorText_ = "RtApiCore::probeDeviceOpen: memory error allocating AudioBufferList.";
1085 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
1086 if (result != noErr || dataSize == 0) {
1088 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration for device (" << device << ").";
1089 errorText_ = errorStream_.str();
1093 // Search for one or more streams that contain the desired number of
1094 // channels. CoreAudio devices can have an arbitrary number of
1095 // streams and each stream can have an arbitrary number of channels.
1096 // For each stream, a single buffer of interleaved samples is
1097 // provided. RtAudio prefers the use of one stream of interleaved
1098 // data or multiple consecutive single-channel streams. However, we
1099 // now support multiple consecutive multi-channel streams of
1100 // interleaved data as well.
1101 UInt32 iStream, offsetCounter = firstChannel;
1102 UInt32 nStreams = bufferList->mNumberBuffers;
1103 bool monoMode = false;
1104 bool foundStream = false;
1106 // First check that the device supports the requested number of
1108 UInt32 deviceChannels = 0;
1109 for ( iStream=0; iStream<nStreams; iStream++ )
1110 deviceChannels += bufferList->mBuffers[iStream].mNumberChannels;
1112 if ( deviceChannels < ( channels + firstChannel ) ) {
1114 errorStream_ << "RtApiCore::probeDeviceOpen: the device (" << device << ") does not support the requested channel count.";
1115 errorText_ = errorStream_.str();
1119 // Look for a single stream meeting our needs.
1120 UInt32 firstStream, streamCount = 1, streamChannels = 0, channelOffset = 0;
1121 for ( iStream=0; iStream<nStreams; iStream++ ) {
1122 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
1123 if ( streamChannels >= channels + offsetCounter ) {
1124 firstStream = iStream;
1125 channelOffset = offsetCounter;
1129 if ( streamChannels > offsetCounter ) break;
1130 offsetCounter -= streamChannels;
1133 // If we didn't find a single stream above, then we should be able
1134 // to meet the channel specification with multiple streams.
1135 if ( foundStream == false ) {
1137 offsetCounter = firstChannel;
1138 for ( iStream=0; iStream<nStreams; iStream++ ) {
1139 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
1140 if ( streamChannels > offsetCounter ) break;
1141 offsetCounter -= streamChannels;
1144 firstStream = iStream;
1145 channelOffset = offsetCounter;
1146 Int32 channelCounter = channels + offsetCounter - streamChannels;
1148 if ( streamChannels > 1 ) monoMode = false;
1149 while ( channelCounter > 0 ) {
1150 streamChannels = bufferList->mBuffers[++iStream].mNumberChannels;
1151 if ( streamChannels > 1 ) monoMode = false;
1152 channelCounter -= streamChannels;
1159 // Determine the buffer size.
1160 AudioValueRange bufferRange;
1161 dataSize = sizeof( AudioValueRange );
1162 property.mSelector = kAudioDevicePropertyBufferFrameSizeRange;
1163 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &bufferRange );
1165 if ( result != noErr ) {
1166 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting buffer size range for device (" << device << ").";
1167 errorText_ = errorStream_.str();
1171 if ( bufferRange.mMinimum > *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMinimum;
1172 else if ( bufferRange.mMaximum < *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMaximum;
1173 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) *bufferSize = (unsigned long) bufferRange.mMinimum;
1175 // Set the buffer size. For multiple streams, I'm assuming we only
1176 // need to make this setting for the master channel.
1177 UInt32 theSize = (UInt32) *bufferSize;
1178 dataSize = sizeof( UInt32 );
1179 property.mSelector = kAudioDevicePropertyBufferFrameSize;
1180 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &theSize );
1182 if ( result != noErr ) {
1183 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting the buffer size for device (" << device << ").";
1184 errorText_ = errorStream_.str();
1188 // If attempting to setup a duplex stream, the bufferSize parameter
1189 // MUST be the same in both directions!
1190 *bufferSize = theSize;
1191 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
1192 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << device << ").";
1193 errorText_ = errorStream_.str();
1197 stream_.bufferSize = *bufferSize;
1198 stream_.nBuffers = 1;
1200 // Try to set "hog" mode ... it's not clear to me this is working.
1201 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) {
1203 dataSize = sizeof( hog_pid );
1204 property.mSelector = kAudioDevicePropertyHogMode;
1205 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &hog_pid );
1206 if ( result != noErr ) {
1207 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting 'hog' state!";
1208 errorText_ = errorStream_.str();
1212 if ( hog_pid != getpid() ) {
1214 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &hog_pid );
1215 if ( result != noErr ) {
1216 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting 'hog' state!";
1217 errorText_ = errorStream_.str();
1223 // Check and if necessary, change the sample rate for the device.
1224 Float64 nominalRate;
1225 dataSize = sizeof( Float64 );
1226 property.mSelector = kAudioDevicePropertyNominalSampleRate;
1227 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &nominalRate );
1228 if ( result != noErr ) {
1229 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting current sample rate.";
1230 errorText_ = errorStream_.str();
1234 // Only change the sample rate if off by more than 1 Hz.
1235 if ( fabs( nominalRate - (double)sampleRate ) > 1.0 ) {
1237 // Set a property listener for the sample rate change
1238 Float64 reportedRate = 0.0;
1239 AudioObjectPropertyAddress tmp = { kAudioDevicePropertyNominalSampleRate, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
1240 result = AudioObjectAddPropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1241 if ( result != noErr ) {
1242 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate property listener for device (" << device << ").";
1243 errorText_ = errorStream_.str();
1247 nominalRate = (Float64) sampleRate;
1248 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &nominalRate );
1249 if ( result != noErr ) {
1250 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1251 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate for device (" << device << ").";
1252 errorText_ = errorStream_.str();
1256 // Now wait until the reported nominal rate is what we just set.
1257 UInt32 microCounter = 0;
1258 while ( reportedRate != nominalRate ) {
1259 microCounter += 5000;
1260 if ( microCounter > 5000000 ) break;
1264 // Remove the property listener.
1265 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1267 if ( microCounter > 5000000 ) {
1268 errorStream_ << "RtApiCore::probeDeviceOpen: timeout waiting for sample rate update for device (" << device << ").";
1269 errorText_ = errorStream_.str();
1274 // Now set the stream format for all streams. Also, check the
1275 // physical format of the device and change that if necessary.
1276 AudioStreamBasicDescription description;
1277 dataSize = sizeof( AudioStreamBasicDescription );
1278 property.mSelector = kAudioStreamPropertyVirtualFormat;
1279 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
1280 if ( result != noErr ) {
1281 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream format for device (" << device << ").";
1282 errorText_ = errorStream_.str();
1286 // Set the sample rate and data format id. However, only make the
1287 // change if the sample rate is not within 1.0 of the desired
1288 // rate and the format is not linear pcm.
1289 bool updateFormat = false;
1290 if ( fabs( description.mSampleRate - (Float64)sampleRate ) > 1.0 ) {
1291 description.mSampleRate = (Float64) sampleRate;
1292 updateFormat = true;
1295 if ( description.mFormatID != kAudioFormatLinearPCM ) {
1296 description.mFormatID = kAudioFormatLinearPCM;
1297 updateFormat = true;
1300 if ( updateFormat ) {
1301 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &description );
1302 if ( result != noErr ) {
1303 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate or data format for device (" << device << ").";
1304 errorText_ = errorStream_.str();
1309 // Now check the physical format.
1310 property.mSelector = kAudioStreamPropertyPhysicalFormat;
1311 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
1312 if ( result != noErr ) {
1313 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream physical format for device (" << device << ").";
1314 errorText_ = errorStream_.str();
1318 //std::cout << "Current physical stream format:" << std::endl;
1319 //std::cout << " mBitsPerChan = " << description.mBitsPerChannel << std::endl;
1320 //std::cout << " aligned high = " << (description.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (description.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
1321 //std::cout << " bytesPerFrame = " << description.mBytesPerFrame << std::endl;
1322 //std::cout << " sample rate = " << description.mSampleRate << std::endl;
1324 if ( description.mFormatID != kAudioFormatLinearPCM || description.mBitsPerChannel < 16 ) {
1325 description.mFormatID = kAudioFormatLinearPCM;
1326 //description.mSampleRate = (Float64) sampleRate;
1327 AudioStreamBasicDescription testDescription = description;
1330 // We'll try higher bit rates first and then work our way down.
1331 std::vector< std::pair<UInt32, UInt32> > physicalFormats;
1332 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsFloat) & ~kLinearPCMFormatFlagIsSignedInteger;
1333 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
1334 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
1335 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
1336 physicalFormats.push_back( std::pair<Float32, UInt32>( 24, formatFlags ) ); // 24-bit packed
1337 formatFlags &= ~( kAudioFormatFlagIsPacked | kAudioFormatFlagIsAlignedHigh );
1338 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.2, formatFlags ) ); // 24-bit in 4 bytes, aligned low
1339 formatFlags |= kAudioFormatFlagIsAlignedHigh;
1340 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.4, formatFlags ) ); // 24-bit in 4 bytes, aligned high
1341 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
1342 physicalFormats.push_back( std::pair<Float32, UInt32>( 16, formatFlags ) );
1343 physicalFormats.push_back( std::pair<Float32, UInt32>( 8, formatFlags ) );
1345 bool setPhysicalFormat = false;
1346 for( unsigned int i=0; i<physicalFormats.size(); i++ ) {
1347 testDescription = description;
1348 testDescription.mBitsPerChannel = (UInt32) physicalFormats[i].first;
1349 testDescription.mFormatFlags = physicalFormats[i].second;
1350 if ( (24 == (UInt32)physicalFormats[i].first) && ~( physicalFormats[i].second & kAudioFormatFlagIsPacked ) )
1351 testDescription.mBytesPerFrame = 4 * testDescription.mChannelsPerFrame;
1353 testDescription.mBytesPerFrame = testDescription.mBitsPerChannel/8 * testDescription.mChannelsPerFrame;
1354 testDescription.mBytesPerPacket = testDescription.mBytesPerFrame * testDescription.mFramesPerPacket;
1355 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &testDescription );
1356 if ( result == noErr ) {
1357 setPhysicalFormat = true;
1358 //std::cout << "Updated physical stream format:" << std::endl;
1359 //std::cout << " mBitsPerChan = " << testDescription.mBitsPerChannel << std::endl;
1360 //std::cout << " aligned high = " << (testDescription.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (testDescription.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
1361 //std::cout << " bytesPerFrame = " << testDescription.mBytesPerFrame << std::endl;
1362 //std::cout << " sample rate = " << testDescription.mSampleRate << std::endl;
1367 if ( !setPhysicalFormat ) {
1368 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting physical data format for device (" << device << ").";
1369 errorText_ = errorStream_.str();
1372 } // done setting virtual/physical formats.
1374 // Get the stream / device latency.
1376 dataSize = sizeof( UInt32 );
1377 property.mSelector = kAudioDevicePropertyLatency;
1378 if ( AudioObjectHasProperty( id, &property ) == true ) {
1379 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &latency );
1380 if ( result == kAudioHardwareNoError ) stream_.latency[ mode ] = latency;
1382 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting device latency for device (" << device << ").";
1383 errorText_ = errorStream_.str();
1384 error( RtAudioError::WARNING );
1388 // Byte-swapping: According to AudioHardware.h, the stream data will
1389 // always be presented in native-endian format, so we should never
1390 // need to byte swap.
1391 stream_.doByteSwap[mode] = false;
1393 // From the CoreAudio documentation, PCM data must be supplied as
1395 stream_.userFormat = format;
1396 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
1398 if ( streamCount == 1 )
1399 stream_.nDeviceChannels[mode] = description.mChannelsPerFrame;
1400 else // multiple streams
1401 stream_.nDeviceChannels[mode] = channels;
1402 stream_.nUserChannels[mode] = channels;
1403 stream_.channelOffset[mode] = channelOffset; // offset within a CoreAudio stream
1404 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
1405 else stream_.userInterleaved = true;
1406 stream_.deviceInterleaved[mode] = true;
1407 if ( monoMode == true ) stream_.deviceInterleaved[mode] = false;
1409 // Set flags for buffer conversion.
1410 stream_.doConvertBuffer[mode] = false;
1411 if ( stream_.userFormat != stream_.deviceFormat[mode] )
1412 stream_.doConvertBuffer[mode] = true;
1413 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
1414 stream_.doConvertBuffer[mode] = true;
1415 if ( streamCount == 1 ) {
1416 if ( stream_.nUserChannels[mode] > 1 &&
1417 stream_.userInterleaved != stream_.deviceInterleaved[mode] )
1418 stream_.doConvertBuffer[mode] = true;
1420 else if ( monoMode && stream_.userInterleaved )
1421 stream_.doConvertBuffer[mode] = true;
1423 // Allocate our CoreHandle structure for the stream.
1424 CoreHandle *handle = 0;
1425 if ( stream_.apiHandle == 0 ) {
1427 handle = new CoreHandle;
1429 catch ( std::bad_alloc& ) {
1430 errorText_ = "RtApiCore::probeDeviceOpen: error allocating CoreHandle memory.";
1434 if ( pthread_cond_init( &handle->condition, NULL ) ) {
1435 errorText_ = "RtApiCore::probeDeviceOpen: error initializing pthread condition variable.";
1438 stream_.apiHandle = (void *) handle;
1441 handle = (CoreHandle *) stream_.apiHandle;
1442 handle->iStream[mode] = firstStream;
1443 handle->nStreams[mode] = streamCount;
1444 handle->id[mode] = id;
1446 // Allocate necessary internal buffers.
1447 unsigned long bufferBytes;
1448 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
1449 // stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
1450 stream_.userBuffer[mode] = (char *) malloc( bufferBytes * sizeof(char) );
1451 memset( stream_.userBuffer[mode], 0, bufferBytes * sizeof(char) );
1452 if ( stream_.userBuffer[mode] == NULL ) {
1453 errorText_ = "RtApiCore::probeDeviceOpen: error allocating user buffer memory.";
1457 // If possible, we will make use of the CoreAudio stream buffers as
1458 // "device buffers". However, we can't do this if using multiple
1460 if ( stream_.doConvertBuffer[mode] && handle->nStreams[mode] > 1 ) {
1462 bool makeBuffer = true;
1463 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
1464 if ( mode == INPUT ) {
1465 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
1466 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
1467 if ( bufferBytes <= bytesOut ) makeBuffer = false;
1472 bufferBytes *= *bufferSize;
1473 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
1474 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
1475 if ( stream_.deviceBuffer == NULL ) {
1476 errorText_ = "RtApiCore::probeDeviceOpen: error allocating device buffer memory.";
1482 stream_.sampleRate = sampleRate;
1483 stream_.device[mode] = device;
1484 stream_.state = STREAM_STOPPED;
1485 stream_.callbackInfo.object = (void *) this;
1487 // Setup the buffer conversion information structure.
1488 if ( stream_.doConvertBuffer[mode] ) {
1489 if ( streamCount > 1 ) setConvertInfo( mode, 0 );
1490 else setConvertInfo( mode, channelOffset );
1493 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device )
1494 // Only one callback procedure per device.
1495 stream_.mode = DUPLEX;
1497 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1498 result = AudioDeviceCreateIOProcID( id, callbackHandler, (void *) &stream_.callbackInfo, &handle->procId[mode] );
1500 // deprecated in favor of AudioDeviceCreateIOProcID()
1501 result = AudioDeviceAddIOProc( id, callbackHandler, (void *) &stream_.callbackInfo );
1503 if ( result != noErr ) {
1504 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting callback for device (" << device << ").";
1505 errorText_ = errorStream_.str();
1508 if ( stream_.mode == OUTPUT && mode == INPUT )
1509 stream_.mode = DUPLEX;
1511 stream_.mode = mode;
1514 // Setup the device property listener for over/underload.
1515 property.mSelector = kAudioDeviceProcessorOverload;
1516 property.mScope = kAudioObjectPropertyScopeGlobal;
1517 result = AudioObjectAddPropertyListener( id, &property, xrunListener, (void *) handle );
1523 pthread_cond_destroy( &handle->condition );
1525 stream_.apiHandle = 0;
1528 for ( int i=0; i<2; i++ ) {
1529 if ( stream_.userBuffer[i] ) {
1530 free( stream_.userBuffer[i] );
1531 stream_.userBuffer[i] = 0;
1535 if ( stream_.deviceBuffer ) {
1536 free( stream_.deviceBuffer );
1537 stream_.deviceBuffer = 0;
1540 stream_.state = STREAM_CLOSED;
1544 void RtApiCore :: closeStream( void )
1546 if ( stream_.state == STREAM_CLOSED ) {
1547 errorText_ = "RtApiCore::closeStream(): no open stream to close!";
1548 error( RtAudioError::WARNING );
1552 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1553 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1555 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
1556 kAudioObjectPropertyScopeGlobal,
1557 kAudioObjectPropertyElementMaster };
1559 property.mSelector = kAudioDeviceProcessorOverload;
1560 property.mScope = kAudioObjectPropertyScopeGlobal;
1561 if (AudioObjectRemovePropertyListener( handle->id[0], &property, xrunListener, (void *) handle ) != noErr) {
1562 errorText_ = "RtApiCore::closeStream(): error removing property listener!";
1563 error( RtAudioError::WARNING );
1566 if ( stream_.state == STREAM_RUNNING )
1567 AudioDeviceStop( handle->id[0], callbackHandler );
1568 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1569 AudioDeviceDestroyIOProcID( handle->id[0], handle->procId[0] );
1571 // deprecated in favor of AudioDeviceDestroyIOProcID()
1572 AudioDeviceRemoveIOProc( handle->id[0], callbackHandler );
1576 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1578 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
1579 kAudioObjectPropertyScopeGlobal,
1580 kAudioObjectPropertyElementMaster };
1582 property.mSelector = kAudioDeviceProcessorOverload;
1583 property.mScope = kAudioObjectPropertyScopeGlobal;
1584 if (AudioObjectRemovePropertyListener( handle->id[1], &property, xrunListener, (void *) handle ) != noErr) {
1585 errorText_ = "RtApiCore::closeStream(): error removing property listener!";
1586 error( RtAudioError::WARNING );
1589 if ( stream_.state == STREAM_RUNNING )
1590 AudioDeviceStop( handle->id[1], callbackHandler );
1591 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1592 AudioDeviceDestroyIOProcID( handle->id[1], handle->procId[1] );
1594 // deprecated in favor of AudioDeviceDestroyIOProcID()
1595 AudioDeviceRemoveIOProc( handle->id[1], callbackHandler );
1599 for ( int i=0; i<2; i++ ) {
1600 if ( stream_.userBuffer[i] ) {
1601 free( stream_.userBuffer[i] );
1602 stream_.userBuffer[i] = 0;
1606 if ( stream_.deviceBuffer ) {
1607 free( stream_.deviceBuffer );
1608 stream_.deviceBuffer = 0;
1611 // Destroy pthread condition variable.
1612 pthread_cond_destroy( &handle->condition );
1614 stream_.apiHandle = 0;
1616 stream_.mode = UNINITIALIZED;
1617 stream_.state = STREAM_CLOSED;
1620 void RtApiCore :: startStream( void )
1623 if ( stream_.state == STREAM_RUNNING ) {
1624 errorText_ = "RtApiCore::startStream(): the stream is already running!";
1625 error( RtAudioError::WARNING );
1629 OSStatus result = noErr;
1630 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1631 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1633 result = AudioDeviceStart( handle->id[0], callbackHandler );
1634 if ( result != noErr ) {
1635 errorStream_ << "RtApiCore::startStream: system error (" << getErrorCode( result ) << ") starting callback procedure on device (" << stream_.device[0] << ").";
1636 errorText_ = errorStream_.str();
1641 if ( stream_.mode == INPUT ||
1642 ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1644 result = AudioDeviceStart( handle->id[1], callbackHandler );
1645 if ( result != noErr ) {
1646 errorStream_ << "RtApiCore::startStream: system error starting input callback procedure on device (" << stream_.device[1] << ").";
1647 errorText_ = errorStream_.str();
1652 handle->drainCounter = 0;
1653 handle->internalDrain = false;
1654 stream_.state = STREAM_RUNNING;
1657 if ( result == noErr ) return;
1658 error( RtAudioError::SYSTEM_ERROR );
1661 void RtApiCore :: stopStream( void )
1664 if ( stream_.state == STREAM_STOPPED ) {
1665 errorText_ = "RtApiCore::stopStream(): the stream is already stopped!";
1666 error( RtAudioError::WARNING );
1670 OSStatus result = noErr;
1671 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1672 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1674 if ( handle->drainCounter == 0 ) {
1675 handle->drainCounter = 2;
1676 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
1679 result = AudioDeviceStop( handle->id[0], callbackHandler );
1680 if ( result != noErr ) {
1681 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping callback procedure on device (" << stream_.device[0] << ").";
1682 errorText_ = errorStream_.str();
1687 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1689 result = AudioDeviceStop( handle->id[1], callbackHandler );
1690 if ( result != noErr ) {
1691 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping input callback procedure on device (" << stream_.device[1] << ").";
1692 errorText_ = errorStream_.str();
1697 stream_.state = STREAM_STOPPED;
1700 if ( result == noErr ) return;
1701 error( RtAudioError::SYSTEM_ERROR );
1704 void RtApiCore :: abortStream( void )
1707 if ( stream_.state == STREAM_STOPPED ) {
1708 errorText_ = "RtApiCore::abortStream(): the stream is already stopped!";
1709 error( RtAudioError::WARNING );
1713 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1714 handle->drainCounter = 2;
1719 // This function will be called by a spawned thread when the user
1720 // callback function signals that the stream should be stopped or
1721 // aborted. It is better to handle it this way because the
1722 // callbackEvent() function probably should return before the AudioDeviceStop()
1723 // function is called.
1724 static void *coreStopStream( void *ptr )
1726 CallbackInfo *info = (CallbackInfo *) ptr;
1727 RtApiCore *object = (RtApiCore *) info->object;
1729 object->stopStream();
1730 pthread_exit( NULL );
1733 bool RtApiCore :: callbackEvent( AudioDeviceID deviceId,
1734 const AudioBufferList *inBufferList,
1735 const AudioBufferList *outBufferList )
1737 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
1738 if ( stream_.state == STREAM_CLOSED ) {
1739 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
1740 error( RtAudioError::WARNING );
1744 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
1745 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1747 // Check if we were draining the stream and signal is finished.
1748 if ( handle->drainCounter > 3 ) {
1749 ThreadHandle threadId;
1751 stream_.state = STREAM_STOPPING;
1752 if ( handle->internalDrain == true )
1753 pthread_create( &threadId, NULL, coreStopStream, info );
1754 else // external call to stopStream()
1755 pthread_cond_signal( &handle->condition );
1759 AudioDeviceID outputDevice = handle->id[0];
1761 // Invoke user callback to get fresh output data UNLESS we are
1762 // draining stream or duplex mode AND the input/output devices are
1763 // different AND this function is called for the input device.
1764 if ( handle->drainCounter == 0 && ( stream_.mode != DUPLEX || deviceId == outputDevice ) ) {
1765 RtAudioCallback callback = (RtAudioCallback) info->callback;
1766 double streamTime = getStreamTime();
1767 RtAudioStreamStatus status = 0;
1768 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
1769 status |= RTAUDIO_OUTPUT_UNDERFLOW;
1770 handle->xrun[0] = false;
1772 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
1773 status |= RTAUDIO_INPUT_OVERFLOW;
1774 handle->xrun[1] = false;
1777 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
1778 stream_.bufferSize, streamTime, status, info->userData );
1779 if ( cbReturnValue == 2 ) {
1780 stream_.state = STREAM_STOPPING;
1781 handle->drainCounter = 2;
1785 else if ( cbReturnValue == 1 ) {
1786 handle->drainCounter = 1;
1787 handle->internalDrain = true;
1791 if ( stream_.mode == OUTPUT || ( stream_.mode == DUPLEX && deviceId == outputDevice ) ) {
1793 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
1795 if ( handle->nStreams[0] == 1 ) {
1796 memset( outBufferList->mBuffers[handle->iStream[0]].mData,
1798 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
1800 else { // fill multiple streams with zeros
1801 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
1802 memset( outBufferList->mBuffers[handle->iStream[0]+i].mData,
1804 outBufferList->mBuffers[handle->iStream[0]+i].mDataByteSize );
1808 else if ( handle->nStreams[0] == 1 ) {
1809 if ( stream_.doConvertBuffer[0] ) { // convert directly to CoreAudio stream buffer
1810 convertBuffer( (char *) outBufferList->mBuffers[handle->iStream[0]].mData,
1811 stream_.userBuffer[0], stream_.convertInfo[0] );
1813 else { // copy from user buffer
1814 memcpy( outBufferList->mBuffers[handle->iStream[0]].mData,
1815 stream_.userBuffer[0],
1816 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
1819 else { // fill multiple streams
1820 Float32 *inBuffer = (Float32 *) stream_.userBuffer[0];
1821 if ( stream_.doConvertBuffer[0] ) {
1822 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
1823 inBuffer = (Float32 *) stream_.deviceBuffer;
1826 if ( stream_.deviceInterleaved[0] == false ) { // mono mode
1827 UInt32 bufferBytes = outBufferList->mBuffers[handle->iStream[0]].mDataByteSize;
1828 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
1829 memcpy( outBufferList->mBuffers[handle->iStream[0]+i].mData,
1830 (void *)&inBuffer[i*stream_.bufferSize], bufferBytes );
1833 else { // fill multiple multi-channel streams with interleaved data
1834 UInt32 streamChannels, channelsLeft, inJump, outJump, inOffset;
1837 bool inInterleaved = ( stream_.userInterleaved ) ? true : false;
1838 UInt32 inChannels = stream_.nUserChannels[0];
1839 if ( stream_.doConvertBuffer[0] ) {
1840 inInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
1841 inChannels = stream_.nDeviceChannels[0];
1844 if ( inInterleaved ) inOffset = 1;
1845 else inOffset = stream_.bufferSize;
1847 channelsLeft = inChannels;
1848 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
1850 out = (Float32 *) outBufferList->mBuffers[handle->iStream[0]+i].mData;
1851 streamChannels = outBufferList->mBuffers[handle->iStream[0]+i].mNumberChannels;
1854 // Account for possible channel offset in first stream
1855 if ( i == 0 && stream_.channelOffset[0] > 0 ) {
1856 streamChannels -= stream_.channelOffset[0];
1857 outJump = stream_.channelOffset[0];
1861 // Account for possible unfilled channels at end of the last stream
1862 if ( streamChannels > channelsLeft ) {
1863 outJump = streamChannels - channelsLeft;
1864 streamChannels = channelsLeft;
1867 // Determine input buffer offsets and skips
1868 if ( inInterleaved ) {
1869 inJump = inChannels;
1870 in += inChannels - channelsLeft;
1874 in += (inChannels - channelsLeft) * inOffset;
1877 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
1878 for ( unsigned int j=0; j<streamChannels; j++ ) {
1879 *out++ = in[j*inOffset];
1884 channelsLeft -= streamChannels;
1890 // Don't bother draining input
1891 if ( handle->drainCounter ) {
1892 handle->drainCounter++;
1896 AudioDeviceID inputDevice;
1897 inputDevice = handle->id[1];
1898 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && deviceId == inputDevice ) ) {
1900 if ( handle->nStreams[1] == 1 ) {
1901 if ( stream_.doConvertBuffer[1] ) { // convert directly from CoreAudio stream buffer
1902 convertBuffer( stream_.userBuffer[1],
1903 (char *) inBufferList->mBuffers[handle->iStream[1]].mData,
1904 stream_.convertInfo[1] );
1906 else { // copy to user buffer
1907 memcpy( stream_.userBuffer[1],
1908 inBufferList->mBuffers[handle->iStream[1]].mData,
1909 inBufferList->mBuffers[handle->iStream[1]].mDataByteSize );
1912 else { // read from multiple streams
1913 Float32 *outBuffer = (Float32 *) stream_.userBuffer[1];
1914 if ( stream_.doConvertBuffer[1] ) outBuffer = (Float32 *) stream_.deviceBuffer;
1916 if ( stream_.deviceInterleaved[1] == false ) { // mono mode
1917 UInt32 bufferBytes = inBufferList->mBuffers[handle->iStream[1]].mDataByteSize;
1918 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
1919 memcpy( (void *)&outBuffer[i*stream_.bufferSize],
1920 inBufferList->mBuffers[handle->iStream[1]+i].mData, bufferBytes );
1923 else { // read from multiple multi-channel streams
1924 UInt32 streamChannels, channelsLeft, inJump, outJump, outOffset;
1927 bool outInterleaved = ( stream_.userInterleaved ) ? true : false;
1928 UInt32 outChannels = stream_.nUserChannels[1];
1929 if ( stream_.doConvertBuffer[1] ) {
1930 outInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
1931 outChannels = stream_.nDeviceChannels[1];
1934 if ( outInterleaved ) outOffset = 1;
1935 else outOffset = stream_.bufferSize;
1937 channelsLeft = outChannels;
1938 for ( unsigned int i=0; i<handle->nStreams[1]; i++ ) {
1940 in = (Float32 *) inBufferList->mBuffers[handle->iStream[1]+i].mData;
1941 streamChannels = inBufferList->mBuffers[handle->iStream[1]+i].mNumberChannels;
1944 // Account for possible channel offset in first stream
1945 if ( i == 0 && stream_.channelOffset[1] > 0 ) {
1946 streamChannels -= stream_.channelOffset[1];
1947 inJump = stream_.channelOffset[1];
1951 // Account for possible unread channels at end of the last stream
1952 if ( streamChannels > channelsLeft ) {
1953 inJump = streamChannels - channelsLeft;
1954 streamChannels = channelsLeft;
1957 // Determine output buffer offsets and skips
1958 if ( outInterleaved ) {
1959 outJump = outChannels;
1960 out += outChannels - channelsLeft;
1964 out += (outChannels - channelsLeft) * outOffset;
1967 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
1968 for ( unsigned int j=0; j<streamChannels; j++ ) {
1969 out[j*outOffset] = *in++;
1974 channelsLeft -= streamChannels;
1978 if ( stream_.doConvertBuffer[1] ) { // convert from our internal "device" buffer
1979 convertBuffer( stream_.userBuffer[1],
1980 stream_.deviceBuffer,
1981 stream_.convertInfo[1] );
1987 //MUTEX_UNLOCK( &stream_.mutex );
1989 RtApi::tickStreamTime();
1993 const char* RtApiCore :: getErrorCode( OSStatus code )
1997 case kAudioHardwareNotRunningError:
1998 return "kAudioHardwareNotRunningError";
2000 case kAudioHardwareUnspecifiedError:
2001 return "kAudioHardwareUnspecifiedError";
2003 case kAudioHardwareUnknownPropertyError:
2004 return "kAudioHardwareUnknownPropertyError";
2006 case kAudioHardwareBadPropertySizeError:
2007 return "kAudioHardwareBadPropertySizeError";
2009 case kAudioHardwareIllegalOperationError:
2010 return "kAudioHardwareIllegalOperationError";
2012 case kAudioHardwareBadObjectError:
2013 return "kAudioHardwareBadObjectError";
2015 case kAudioHardwareBadDeviceError:
2016 return "kAudioHardwareBadDeviceError";
2018 case kAudioHardwareBadStreamError:
2019 return "kAudioHardwareBadStreamError";
2021 case kAudioHardwareUnsupportedOperationError:
2022 return "kAudioHardwareUnsupportedOperationError";
2024 case kAudioDeviceUnsupportedFormatError:
2025 return "kAudioDeviceUnsupportedFormatError";
2027 case kAudioDevicePermissionsError:
2028 return "kAudioDevicePermissionsError";
2031 return "CoreAudio unknown error";
2035 //******************** End of __MACOSX_CORE__ *********************//
2038 #if defined(__UNIX_JACK__)
2040 // JACK is a low-latency audio server, originally written for the
2041 // GNU/Linux operating system and now also ported to OS-X. It can
2042 // connect a number of different applications to an audio device, as
2043 // well as allowing them to share audio between themselves.
2045 // When using JACK with RtAudio, "devices" refer to JACK clients that
2046 // have ports connected to the server. The JACK server is typically
2047 // started in a terminal as follows:
2049 // .jackd -d alsa -d hw:0
2051 // or through an interface program such as qjackctl. Many of the
2052 // parameters normally set for a stream are fixed by the JACK server
2053 // and can be specified when the JACK server is started. In
2056 // .jackd -d alsa -d hw:0 -r 44100 -p 512 -n 4
2058 // specifies a sample rate of 44100 Hz, a buffer size of 512 sample
2059 // frames, and number of buffers = 4. Once the server is running, it
2060 // is not possible to override these values. If the values are not
2061 // specified in the command-line, the JACK server uses default values.
2063 // The JACK server does not have to be running when an instance of
2064 // RtApiJack is created, though the function getDeviceCount() will
2065 // report 0 devices found until JACK has been started. When no
2066 // devices are available (i.e., the JACK server is not running), a
2067 // stream cannot be opened.
2069 #include <jack/jack.h>
2073 // A structure to hold various information related to the Jack API
2076 jack_client_t *client;
2077 jack_port_t **ports[2];
2078 std::string deviceName[2];
2080 pthread_cond_t condition;
2081 int drainCounter; // Tracks callback counts when draining
2082 bool internalDrain; // Indicates if stop is initiated from callback or not.
2085 :client(0), drainCounter(0), internalDrain(false) { ports[0] = 0; ports[1] = 0; xrun[0] = false; xrun[1] = false; }
2088 #if !defined(__RTAUDIO_DEBUG__)
2089 static void jackSilentError( const char * ) {};
2092 RtApiJack :: RtApiJack()
2093 :shouldAutoconnect_(true) {
2094 // Nothing to do here.
2095 #if !defined(__RTAUDIO_DEBUG__)
2096 // Turn off Jack's internal error reporting.
2097 jack_set_error_function( &jackSilentError );
2101 RtApiJack :: ~RtApiJack()
2103 if ( stream_.state != STREAM_CLOSED ) closeStream();
2106 unsigned int RtApiJack :: getDeviceCount( void )
2108 // See if we can become a jack client.
2109 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
2110 jack_status_t *status = NULL;
2111 jack_client_t *client = jack_client_open( "RtApiJackCount", options, status );
2112 if ( client == 0 ) return 0;
2115 std::string port, previousPort;
2116 unsigned int nChannels = 0, nDevices = 0;
2117 ports = jack_get_ports( client, NULL, NULL, 0 );
2119 // Parse the port names up to the first colon (:).
2122 port = (char *) ports[ nChannels ];
2123 iColon = port.find(":");
2124 if ( iColon != std::string::npos ) {
2125 port = port.substr( 0, iColon + 1 );
2126 if ( port != previousPort ) {
2128 previousPort = port;
2131 } while ( ports[++nChannels] );
2135 jack_client_close( client );
2139 RtAudio::DeviceInfo RtApiJack :: getDeviceInfo( unsigned int device )
2141 RtAudio::DeviceInfo info;
2142 info.probed = false;
2144 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption
2145 jack_status_t *status = NULL;
2146 jack_client_t *client = jack_client_open( "RtApiJackInfo", options, status );
2147 if ( client == 0 ) {
2148 errorText_ = "RtApiJack::getDeviceInfo: Jack server not found or connection error!";
2149 error( RtAudioError::WARNING );
2154 std::string port, previousPort;
2155 unsigned int nPorts = 0, nDevices = 0;
2156 ports = jack_get_ports( client, NULL, NULL, 0 );
2158 // Parse the port names up to the first colon (:).
2161 port = (char *) ports[ nPorts ];
2162 iColon = port.find(":");
2163 if ( iColon != std::string::npos ) {
2164 port = port.substr( 0, iColon );
2165 if ( port != previousPort ) {
2166 if ( nDevices == device ) info.name = port;
2168 previousPort = port;
2171 } while ( ports[++nPorts] );
2175 if ( device >= nDevices ) {
2176 jack_client_close( client );
2177 errorText_ = "RtApiJack::getDeviceInfo: device ID is invalid!";
2178 error( RtAudioError::INVALID_USE );
2182 // Get the current jack server sample rate.
2183 info.sampleRates.clear();
2185 info.preferredSampleRate = jack_get_sample_rate( client );
2186 info.sampleRates.push_back( info.preferredSampleRate );
2188 // Count the available ports containing the client name as device
2189 // channels. Jack "input ports" equal RtAudio output channels.
2190 unsigned int nChannels = 0;
2191 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsInput );
2193 while ( ports[ nChannels ] ) nChannels++;
2195 info.outputChannels = nChannels;
2198 // Jack "output ports" equal RtAudio input channels.
2200 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsOutput );
2202 while ( ports[ nChannels ] ) nChannels++;
2204 info.inputChannels = nChannels;
2207 if ( info.outputChannels == 0 && info.inputChannels == 0 ) {
2208 jack_client_close(client);
2209 errorText_ = "RtApiJack::getDeviceInfo: error determining Jack input/output channels!";
2210 error( RtAudioError::WARNING );
2214 // If device opens for both playback and capture, we determine the channels.
2215 if ( info.outputChannels > 0 && info.inputChannels > 0 )
2216 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
2218 // Jack always uses 32-bit floats.
2219 info.nativeFormats = RTAUDIO_FLOAT32;
2221 // Jack doesn't provide default devices so we'll use the first available one.
2222 if ( device == 0 && info.outputChannels > 0 )
2223 info.isDefaultOutput = true;
2224 if ( device == 0 && info.inputChannels > 0 )
2225 info.isDefaultInput = true;
2227 jack_client_close(client);
2232 static int jackCallbackHandler( jack_nframes_t nframes, void *infoPointer )
2234 CallbackInfo *info = (CallbackInfo *) infoPointer;
2236 RtApiJack *object = (RtApiJack *) info->object;
2237 if ( object->callbackEvent( (unsigned long) nframes ) == false ) return 1;
2242 // This function will be called by a spawned thread when the Jack
2243 // server signals that it is shutting down. It is necessary to handle
2244 // it this way because the jackShutdown() function must return before
2245 // the jack_deactivate() function (in closeStream()) will return.
2246 static void *jackCloseStream( void *ptr )
2248 CallbackInfo *info = (CallbackInfo *) ptr;
2249 RtApiJack *object = (RtApiJack *) info->object;
2251 object->closeStream();
2253 pthread_exit( NULL );
2255 static void jackShutdown( void *infoPointer )
2257 CallbackInfo *info = (CallbackInfo *) infoPointer;
2258 RtApiJack *object = (RtApiJack *) info->object;
2260 // Check current stream state. If stopped, then we'll assume this
2261 // was called as a result of a call to RtApiJack::stopStream (the
2262 // deactivation of a client handle causes this function to be called).
2263 // If not, we'll assume the Jack server is shutting down or some
2264 // other problem occurred and we should close the stream.
2265 if ( object->isStreamRunning() == false ) return;
2267 ThreadHandle threadId;
2268 pthread_create( &threadId, NULL, jackCloseStream, info );
2269 std::cerr << "\nRtApiJack: the Jack server is shutting down this client ... stream stopped and closed!!\n" << std::endl;
2272 static int jackXrun( void *infoPointer )
2274 JackHandle *handle = *((JackHandle **) infoPointer);
2276 if ( handle->ports[0] ) handle->xrun[0] = true;
2277 if ( handle->ports[1] ) handle->xrun[1] = true;
2282 bool RtApiJack :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
2283 unsigned int firstChannel, unsigned int sampleRate,
2284 RtAudioFormat format, unsigned int *bufferSize,
2285 RtAudio::StreamOptions *options )
2287 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2289 // Look for jack server and try to become a client (only do once per stream).
2290 jack_client_t *client = 0;
2291 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) {
2292 jack_options_t jackoptions = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
2293 jack_status_t *status = NULL;
2294 if ( options && !options->streamName.empty() )
2295 client = jack_client_open( options->streamName.c_str(), jackoptions, status );
2297 client = jack_client_open( "RtApiJack", jackoptions, status );
2298 if ( client == 0 ) {
2299 errorText_ = "RtApiJack::probeDeviceOpen: Jack server not found or connection error!";
2300 error( RtAudioError::WARNING );
2305 // The handle must have been created on an earlier pass.
2306 client = handle->client;
2310 std::string port, previousPort, deviceName;
2311 unsigned int nPorts = 0, nDevices = 0;
2312 ports = jack_get_ports( client, NULL, NULL, 0 );
2314 // Parse the port names up to the first colon (:).
2317 port = (char *) ports[ nPorts ];
2318 iColon = port.find(":");
2319 if ( iColon != std::string::npos ) {
2320 port = port.substr( 0, iColon );
2321 if ( port != previousPort ) {
2322 if ( nDevices == device ) deviceName = port;
2324 previousPort = port;
2327 } while ( ports[++nPorts] );
2331 if ( device >= nDevices ) {
2332 errorText_ = "RtApiJack::probeDeviceOpen: device ID is invalid!";
2336 // Count the available ports containing the client name as device
2337 // channels. Jack "input ports" equal RtAudio output channels.
2338 unsigned int nChannels = 0;
2339 unsigned long flag = JackPortIsInput;
2340 if ( mode == INPUT ) flag = JackPortIsOutput;
2341 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
2343 while ( ports[ nChannels ] ) nChannels++;
2347 // Compare the jack ports for specified client to the requested number of channels.
2348 if ( nChannels < (channels + firstChannel) ) {
2349 errorStream_ << "RtApiJack::probeDeviceOpen: requested number of channels (" << channels << ") + offset (" << firstChannel << ") not found for specified device (" << device << ":" << deviceName << ").";
2350 errorText_ = errorStream_.str();
2354 // Check the jack server sample rate.
2355 unsigned int jackRate = jack_get_sample_rate( client );
2356 if ( sampleRate != jackRate ) {
2357 jack_client_close( client );
2358 errorStream_ << "RtApiJack::probeDeviceOpen: the requested sample rate (" << sampleRate << ") is different than the JACK server rate (" << jackRate << ").";
2359 errorText_ = errorStream_.str();
2362 stream_.sampleRate = jackRate;
2364 // Get the latency of the JACK port.
2365 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
2366 if ( ports[ firstChannel ] ) {
2368 jack_latency_callback_mode_t cbmode = (mode == INPUT ? JackCaptureLatency : JackPlaybackLatency);
2369 // the range (usually the min and max are equal)
2370 jack_latency_range_t latrange; latrange.min = latrange.max = 0;
2371 // get the latency range
2372 jack_port_get_latency_range( jack_port_by_name( client, ports[firstChannel] ), cbmode, &latrange );
2373 // be optimistic, use the min!
2374 stream_.latency[mode] = latrange.min;
2375 //stream_.latency[mode] = jack_port_get_latency( jack_port_by_name( client, ports[ firstChannel ] ) );
2379 // The jack server always uses 32-bit floating-point data.
2380 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
2381 stream_.userFormat = format;
2383 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
2384 else stream_.userInterleaved = true;
2386 // Jack always uses non-interleaved buffers.
2387 stream_.deviceInterleaved[mode] = false;
2389 // Jack always provides host byte-ordered data.
2390 stream_.doByteSwap[mode] = false;
2392 // Get the buffer size. The buffer size and number of buffers
2393 // (periods) is set when the jack server is started.
2394 stream_.bufferSize = (int) jack_get_buffer_size( client );
2395 *bufferSize = stream_.bufferSize;
2397 stream_.nDeviceChannels[mode] = channels;
2398 stream_.nUserChannels[mode] = channels;
2400 // Set flags for buffer conversion.
2401 stream_.doConvertBuffer[mode] = false;
2402 if ( stream_.userFormat != stream_.deviceFormat[mode] )
2403 stream_.doConvertBuffer[mode] = true;
2404 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
2405 stream_.nUserChannels[mode] > 1 )
2406 stream_.doConvertBuffer[mode] = true;
2408 // Allocate our JackHandle structure for the stream.
2409 if ( handle == 0 ) {
2411 handle = new JackHandle;
2413 catch ( std::bad_alloc& ) {
2414 errorText_ = "RtApiJack::probeDeviceOpen: error allocating JackHandle memory.";
2418 if ( pthread_cond_init(&handle->condition, NULL) ) {
2419 errorText_ = "RtApiJack::probeDeviceOpen: error initializing pthread condition variable.";
2422 stream_.apiHandle = (void *) handle;
2423 handle->client = client;
2425 handle->deviceName[mode] = deviceName;
2427 // Allocate necessary internal buffers.
2428 unsigned long bufferBytes;
2429 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
2430 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
2431 if ( stream_.userBuffer[mode] == NULL ) {
2432 errorText_ = "RtApiJack::probeDeviceOpen: error allocating user buffer memory.";
2436 if ( stream_.doConvertBuffer[mode] ) {
2438 bool makeBuffer = true;
2439 if ( mode == OUTPUT )
2440 bufferBytes = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
2441 else { // mode == INPUT
2442 bufferBytes = stream_.nDeviceChannels[1] * formatBytes( stream_.deviceFormat[1] );
2443 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
2444 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes(stream_.deviceFormat[0]);
2445 if ( bufferBytes < bytesOut ) makeBuffer = false;
2450 bufferBytes *= *bufferSize;
2451 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
2452 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
2453 if ( stream_.deviceBuffer == NULL ) {
2454 errorText_ = "RtApiJack::probeDeviceOpen: error allocating device buffer memory.";
2460 // Allocate memory for the Jack ports (channels) identifiers.
2461 handle->ports[mode] = (jack_port_t **) malloc ( sizeof (jack_port_t *) * channels );
2462 if ( handle->ports[mode] == NULL ) {
2463 errorText_ = "RtApiJack::probeDeviceOpen: error allocating port memory.";
2467 stream_.device[mode] = device;
2468 stream_.channelOffset[mode] = firstChannel;
2469 stream_.state = STREAM_STOPPED;
2470 stream_.callbackInfo.object = (void *) this;
2472 if ( stream_.mode == OUTPUT && mode == INPUT )
2473 // We had already set up the stream for output.
2474 stream_.mode = DUPLEX;
2476 stream_.mode = mode;
2477 jack_set_process_callback( handle->client, jackCallbackHandler, (void *) &stream_.callbackInfo );
2478 jack_set_xrun_callback( handle->client, jackXrun, (void *) &stream_.apiHandle );
2479 jack_on_shutdown( handle->client, jackShutdown, (void *) &stream_.callbackInfo );
2482 // Register our ports.
2484 if ( mode == OUTPUT ) {
2485 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2486 snprintf( label, 64, "outport %d", i );
2487 handle->ports[0][i] = jack_port_register( handle->client, (const char *)label,
2488 JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput, 0 );
2492 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2493 snprintf( label, 64, "inport %d", i );
2494 handle->ports[1][i] = jack_port_register( handle->client, (const char *)label,
2495 JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput, 0 );
2499 // Setup the buffer conversion information structure. We don't use
2500 // buffers to do channel offsets, so we override that parameter
2502 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
2504 if ( options && options->flags & RTAUDIO_JACK_DONT_CONNECT ) shouldAutoconnect_ = false;
2510 pthread_cond_destroy( &handle->condition );
2511 jack_client_close( handle->client );
2513 if ( handle->ports[0] ) free( handle->ports[0] );
2514 if ( handle->ports[1] ) free( handle->ports[1] );
2517 stream_.apiHandle = 0;
2520 for ( int i=0; i<2; i++ ) {
2521 if ( stream_.userBuffer[i] ) {
2522 free( stream_.userBuffer[i] );
2523 stream_.userBuffer[i] = 0;
2527 if ( stream_.deviceBuffer ) {
2528 free( stream_.deviceBuffer );
2529 stream_.deviceBuffer = 0;
2535 void RtApiJack :: closeStream( void )
2537 if ( stream_.state == STREAM_CLOSED ) {
2538 errorText_ = "RtApiJack::closeStream(): no open stream to close!";
2539 error( RtAudioError::WARNING );
2543 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2546 if ( stream_.state == STREAM_RUNNING )
2547 jack_deactivate( handle->client );
2549 jack_client_close( handle->client );
2553 if ( handle->ports[0] ) free( handle->ports[0] );
2554 if ( handle->ports[1] ) free( handle->ports[1] );
2555 pthread_cond_destroy( &handle->condition );
2557 stream_.apiHandle = 0;
2560 for ( int i=0; i<2; i++ ) {
2561 if ( stream_.userBuffer[i] ) {
2562 free( stream_.userBuffer[i] );
2563 stream_.userBuffer[i] = 0;
2567 if ( stream_.deviceBuffer ) {
2568 free( stream_.deviceBuffer );
2569 stream_.deviceBuffer = 0;
2572 stream_.mode = UNINITIALIZED;
2573 stream_.state = STREAM_CLOSED;
2576 void RtApiJack :: startStream( void )
2579 if ( stream_.state == STREAM_RUNNING ) {
2580 errorText_ = "RtApiJack::startStream(): the stream is already running!";
2581 error( RtAudioError::WARNING );
2585 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2586 int result = jack_activate( handle->client );
2588 errorText_ = "RtApiJack::startStream(): unable to activate JACK client!";
2594 // Get the list of available ports.
2595 if ( shouldAutoconnect_ && (stream_.mode == OUTPUT || stream_.mode == DUPLEX) ) {
2597 ports = jack_get_ports( handle->client, handle->deviceName[0].c_str(), NULL, JackPortIsInput);
2598 if ( ports == NULL) {
2599 errorText_ = "RtApiJack::startStream(): error determining available JACK input ports!";
2603 // Now make the port connections. Since RtAudio wasn't designed to
2604 // allow the user to select particular channels of a device, we'll
2605 // just open the first "nChannels" ports with offset.
2606 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2608 if ( ports[ stream_.channelOffset[0] + i ] )
2609 result = jack_connect( handle->client, jack_port_name( handle->ports[0][i] ), ports[ stream_.channelOffset[0] + i ] );
2612 errorText_ = "RtApiJack::startStream(): error connecting output ports!";
2619 if ( shouldAutoconnect_ && (stream_.mode == INPUT || stream_.mode == DUPLEX) ) {
2621 ports = jack_get_ports( handle->client, handle->deviceName[1].c_str(), NULL, JackPortIsOutput );
2622 if ( ports == NULL) {
2623 errorText_ = "RtApiJack::startStream(): error determining available JACK output ports!";
2627 // Now make the port connections. See note above.
2628 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2630 if ( ports[ stream_.channelOffset[1] + i ] )
2631 result = jack_connect( handle->client, ports[ stream_.channelOffset[1] + i ], jack_port_name( handle->ports[1][i] ) );
2634 errorText_ = "RtApiJack::startStream(): error connecting input ports!";
2641 handle->drainCounter = 0;
2642 handle->internalDrain = false;
2643 stream_.state = STREAM_RUNNING;
2646 if ( result == 0 ) return;
2647 error( RtAudioError::SYSTEM_ERROR );
2650 void RtApiJack :: stopStream( void )
2653 if ( stream_.state == STREAM_STOPPED ) {
2654 errorText_ = "RtApiJack::stopStream(): the stream is already stopped!";
2655 error( RtAudioError::WARNING );
2659 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2660 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
2662 if ( handle->drainCounter == 0 ) {
2663 handle->drainCounter = 2;
2664 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
2668 jack_deactivate( handle->client );
2669 stream_.state = STREAM_STOPPED;
2672 void RtApiJack :: abortStream( void )
2675 if ( stream_.state == STREAM_STOPPED ) {
2676 errorText_ = "RtApiJack::abortStream(): the stream is already stopped!";
2677 error( RtAudioError::WARNING );
2681 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2682 handle->drainCounter = 2;
2687 // This function will be called by a spawned thread when the user
2688 // callback function signals that the stream should be stopped or
2689 // aborted. It is necessary to handle it this way because the
2690 // callbackEvent() function must return before the jack_deactivate()
2691 // function will return.
2692 static void *jackStopStream( void *ptr )
2694 CallbackInfo *info = (CallbackInfo *) ptr;
2695 RtApiJack *object = (RtApiJack *) info->object;
2697 object->stopStream();
2698 pthread_exit( NULL );
2701 bool RtApiJack :: callbackEvent( unsigned long nframes )
2703 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
2704 if ( stream_.state == STREAM_CLOSED ) {
2705 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
2706 error( RtAudioError::WARNING );
2709 if ( stream_.bufferSize != nframes ) {
2710 errorText_ = "RtApiCore::callbackEvent(): the JACK buffer size has changed ... cannot process!";
2711 error( RtAudioError::WARNING );
2715 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
2716 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2718 // Check if we were draining the stream and signal is finished.
2719 if ( handle->drainCounter > 3 ) {
2720 ThreadHandle threadId;
2722 stream_.state = STREAM_STOPPING;
2723 if ( handle->internalDrain == true )
2724 pthread_create( &threadId, NULL, jackStopStream, info );
2726 pthread_cond_signal( &handle->condition );
2730 // Invoke user callback first, to get fresh output data.
2731 if ( handle->drainCounter == 0 ) {
2732 RtAudioCallback callback = (RtAudioCallback) info->callback;
2733 double streamTime = getStreamTime();
2734 RtAudioStreamStatus status = 0;
2735 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
2736 status |= RTAUDIO_OUTPUT_UNDERFLOW;
2737 handle->xrun[0] = false;
2739 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
2740 status |= RTAUDIO_INPUT_OVERFLOW;
2741 handle->xrun[1] = false;
2743 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
2744 stream_.bufferSize, streamTime, status, info->userData );
2745 if ( cbReturnValue == 2 ) {
2746 stream_.state = STREAM_STOPPING;
2747 handle->drainCounter = 2;
2749 pthread_create( &id, NULL, jackStopStream, info );
2752 else if ( cbReturnValue == 1 ) {
2753 handle->drainCounter = 1;
2754 handle->internalDrain = true;
2758 jack_default_audio_sample_t *jackbuffer;
2759 unsigned long bufferBytes = nframes * sizeof( jack_default_audio_sample_t );
2760 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
2762 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
2764 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
2765 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2766 memset( jackbuffer, 0, bufferBytes );
2770 else if ( stream_.doConvertBuffer[0] ) {
2772 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
2774 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
2775 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2776 memcpy( jackbuffer, &stream_.deviceBuffer[i*bufferBytes], bufferBytes );
2779 else { // no buffer conversion
2780 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2781 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2782 memcpy( jackbuffer, &stream_.userBuffer[0][i*bufferBytes], bufferBytes );
2787 // Don't bother draining input
2788 if ( handle->drainCounter ) {
2789 handle->drainCounter++;
2793 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
2795 if ( stream_.doConvertBuffer[1] ) {
2796 for ( unsigned int i=0; i<stream_.nDeviceChannels[1]; i++ ) {
2797 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
2798 memcpy( &stream_.deviceBuffer[i*bufferBytes], jackbuffer, bufferBytes );
2800 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
2802 else { // no buffer conversion
2803 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2804 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
2805 memcpy( &stream_.userBuffer[1][i*bufferBytes], jackbuffer, bufferBytes );
2811 RtApi::tickStreamTime();
2814 //******************** End of __UNIX_JACK__ *********************//
2817 #if defined(__WINDOWS_ASIO__) // ASIO API on Windows
2819 // The ASIO API is designed around a callback scheme, so this
2820 // implementation is similar to that used for OS-X CoreAudio and Linux
2821 // Jack. The primary constraint with ASIO is that it only allows
2822 // access to a single driver at a time. Thus, it is not possible to
2823 // have more than one simultaneous RtAudio stream.
2825 // This implementation also requires a number of external ASIO files
2826 // and a few global variables. The ASIO callback scheme does not
2827 // allow for the passing of user data, so we must create a global
2828 // pointer to our callbackInfo structure.
2830 // On unix systems, we make use of a pthread condition variable.
2831 // Since there is no equivalent in Windows, I hacked something based
2832 // on information found in
2833 // http://www.cs.wustl.edu/~schmidt/win32-cv-1.html.
2835 #include "asiosys.h"
2837 #include "iasiothiscallresolver.h"
2838 #include "asiodrivers.h"
2841 static AsioDrivers drivers;
2842 static ASIOCallbacks asioCallbacks;
2843 static ASIODriverInfo driverInfo;
2844 static CallbackInfo *asioCallbackInfo;
2845 static bool asioXRun;
2848 int drainCounter; // Tracks callback counts when draining
2849 bool internalDrain; // Indicates if stop is initiated from callback or not.
2850 ASIOBufferInfo *bufferInfos;
2854 :drainCounter(0), internalDrain(false), bufferInfos(0) {}
2857 // Function declarations (definitions at end of section)
2858 static const char* getAsioErrorString( ASIOError result );
2859 static void sampleRateChanged( ASIOSampleRate sRate );
2860 static long asioMessages( long selector, long value, void* message, double* opt );
2862 RtApiAsio :: RtApiAsio()
2864 // ASIO cannot run on a multi-threaded appartment. You can call
2865 // CoInitialize beforehand, but it must be for appartment threading
2866 // (in which case, CoInitilialize will return S_FALSE here).
2867 coInitialized_ = false;
2868 HRESULT hr = CoInitialize( NULL );
2870 errorText_ = "RtApiAsio::ASIO requires a single-threaded appartment. Call CoInitializeEx(0,COINIT_APARTMENTTHREADED)";
2871 error( RtAudioError::WARNING );
2873 coInitialized_ = true;
2875 drivers.removeCurrentDriver();
2876 driverInfo.asioVersion = 2;
2878 // See note in DirectSound implementation about GetDesktopWindow().
2879 driverInfo.sysRef = GetForegroundWindow();
2882 RtApiAsio :: ~RtApiAsio()
2884 if ( stream_.state != STREAM_CLOSED ) closeStream();
2885 if ( coInitialized_ ) CoUninitialize();
2888 unsigned int RtApiAsio :: getDeviceCount( void )
2890 return (unsigned int) drivers.asioGetNumDev();
2893 RtAudio::DeviceInfo RtApiAsio :: getDeviceInfo( unsigned int device )
2895 RtAudio::DeviceInfo info;
2896 info.probed = false;
2899 unsigned int nDevices = getDeviceCount();
2900 if ( nDevices == 0 ) {
2901 errorText_ = "RtApiAsio::getDeviceInfo: no devices found!";
2902 error( RtAudioError::INVALID_USE );
2906 if ( device >= nDevices ) {
2907 errorText_ = "RtApiAsio::getDeviceInfo: device ID is invalid!";
2908 error( RtAudioError::INVALID_USE );
2912 // If a stream is already open, we cannot probe other devices. Thus, use the saved results.
2913 if ( stream_.state != STREAM_CLOSED ) {
2914 if ( device >= devices_.size() ) {
2915 errorText_ = "RtApiAsio::getDeviceInfo: device ID was not present before stream was opened.";
2916 error( RtAudioError::WARNING );
2919 return devices_[ device ];
2922 char driverName[32];
2923 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
2924 if ( result != ASE_OK ) {
2925 errorStream_ << "RtApiAsio::getDeviceInfo: unable to get driver name (" << getAsioErrorString( result ) << ").";
2926 errorText_ = errorStream_.str();
2927 error( RtAudioError::WARNING );
2931 info.name = driverName;
2933 if ( !drivers.loadDriver( driverName ) ) {
2934 errorStream_ << "RtApiAsio::getDeviceInfo: unable to load driver (" << driverName << ").";
2935 errorText_ = errorStream_.str();
2936 error( RtAudioError::WARNING );
2940 result = ASIOInit( &driverInfo );
2941 if ( result != ASE_OK ) {
2942 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
2943 errorText_ = errorStream_.str();
2944 error( RtAudioError::WARNING );
2948 // Determine the device channel information.
2949 long inputChannels, outputChannels;
2950 result = ASIOGetChannels( &inputChannels, &outputChannels );
2951 if ( result != ASE_OK ) {
2952 drivers.removeCurrentDriver();
2953 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
2954 errorText_ = errorStream_.str();
2955 error( RtAudioError::WARNING );
2959 info.outputChannels = outputChannels;
2960 info.inputChannels = inputChannels;
2961 if ( info.outputChannels > 0 && info.inputChannels > 0 )
2962 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
2964 // Determine the supported sample rates.
2965 info.sampleRates.clear();
2966 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
2967 result = ASIOCanSampleRate( (ASIOSampleRate) SAMPLE_RATES[i] );
2968 if ( result == ASE_OK ) {
2969 info.sampleRates.push_back( SAMPLE_RATES[i] );
2971 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
2972 info.preferredSampleRate = SAMPLE_RATES[i];
2976 // Determine supported data types ... just check first channel and assume rest are the same.
2977 ASIOChannelInfo channelInfo;
2978 channelInfo.channel = 0;
2979 channelInfo.isInput = true;
2980 if ( info.inputChannels <= 0 ) channelInfo.isInput = false;
2981 result = ASIOGetChannelInfo( &channelInfo );
2982 if ( result != ASE_OK ) {
2983 drivers.removeCurrentDriver();
2984 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting driver channel info (" << driverName << ").";
2985 errorText_ = errorStream_.str();
2986 error( RtAudioError::WARNING );
2990 info.nativeFormats = 0;
2991 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB )
2992 info.nativeFormats |= RTAUDIO_SINT16;
2993 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB )
2994 info.nativeFormats |= RTAUDIO_SINT32;
2995 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB )
2996 info.nativeFormats |= RTAUDIO_FLOAT32;
2997 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB )
2998 info.nativeFormats |= RTAUDIO_FLOAT64;
2999 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB )
3000 info.nativeFormats |= RTAUDIO_SINT24;
3002 if ( info.outputChannels > 0 )
3003 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
3004 if ( info.inputChannels > 0 )
3005 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
3008 drivers.removeCurrentDriver();
3012 static void bufferSwitch( long index, ASIOBool /*processNow*/ )
3014 RtApiAsio *object = (RtApiAsio *) asioCallbackInfo->object;
3015 object->callbackEvent( index );
3018 void RtApiAsio :: saveDeviceInfo( void )
3022 unsigned int nDevices = getDeviceCount();
3023 devices_.resize( nDevices );
3024 for ( unsigned int i=0; i<nDevices; i++ )
3025 devices_[i] = getDeviceInfo( i );
3028 bool RtApiAsio :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
3029 unsigned int firstChannel, unsigned int sampleRate,
3030 RtAudioFormat format, unsigned int *bufferSize,
3031 RtAudio::StreamOptions *options )
3032 {////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
3034 bool isDuplexInput = mode == INPUT && stream_.mode == OUTPUT;
3036 // For ASIO, a duplex stream MUST use the same driver.
3037 if ( isDuplexInput && stream_.device[0] != device ) {
3038 errorText_ = "RtApiAsio::probeDeviceOpen: an ASIO duplex stream must use the same device for input and output!";
3042 char driverName[32];
3043 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
3044 if ( result != ASE_OK ) {
3045 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to get driver name (" << getAsioErrorString( result ) << ").";
3046 errorText_ = errorStream_.str();
3050 // Only load the driver once for duplex stream.
3051 if ( !isDuplexInput ) {
3052 // The getDeviceInfo() function will not work when a stream is open
3053 // because ASIO does not allow multiple devices to run at the same
3054 // time. Thus, we'll probe the system before opening a stream and
3055 // save the results for use by getDeviceInfo().
3056 this->saveDeviceInfo();
3058 if ( !drivers.loadDriver( driverName ) ) {
3059 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to load driver (" << driverName << ").";
3060 errorText_ = errorStream_.str();
3064 result = ASIOInit( &driverInfo );
3065 if ( result != ASE_OK ) {
3066 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
3067 errorText_ = errorStream_.str();
3072 // keep them before any "goto error", they are used for error cleanup + goto device boundary checks
3073 bool buffersAllocated = false;
3074 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3075 unsigned int nChannels;
3078 // Check the device channel count.
3079 long inputChannels, outputChannels;
3080 result = ASIOGetChannels( &inputChannels, &outputChannels );
3081 if ( result != ASE_OK ) {
3082 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
3083 errorText_ = errorStream_.str();
3087 if ( ( mode == OUTPUT && (channels+firstChannel) > (unsigned int) outputChannels) ||
3088 ( mode == INPUT && (channels+firstChannel) > (unsigned int) inputChannels) ) {
3089 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested channel count (" << channels << ") + offset (" << firstChannel << ").";
3090 errorText_ = errorStream_.str();
3093 stream_.nDeviceChannels[mode] = channels;
3094 stream_.nUserChannels[mode] = channels;
3095 stream_.channelOffset[mode] = firstChannel;
3097 // Verify the sample rate is supported.
3098 result = ASIOCanSampleRate( (ASIOSampleRate) sampleRate );
3099 if ( result != ASE_OK ) {
3100 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested sample rate (" << sampleRate << ").";
3101 errorText_ = errorStream_.str();
3105 // Get the current sample rate
3106 ASIOSampleRate currentRate;
3107 result = ASIOGetSampleRate( ¤tRate );
3108 if ( result != ASE_OK ) {
3109 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error getting sample rate.";
3110 errorText_ = errorStream_.str();
3114 // Set the sample rate only if necessary
3115 if ( currentRate != sampleRate ) {
3116 result = ASIOSetSampleRate( (ASIOSampleRate) sampleRate );
3117 if ( result != ASE_OK ) {
3118 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error setting sample rate (" << sampleRate << ").";
3119 errorText_ = errorStream_.str();
3124 // Determine the driver data type.
3125 ASIOChannelInfo channelInfo;
3126 channelInfo.channel = 0;
3127 if ( mode == OUTPUT ) channelInfo.isInput = false;
3128 else channelInfo.isInput = true;
3129 result = ASIOGetChannelInfo( &channelInfo );
3130 if ( result != ASE_OK ) {
3131 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting data format.";
3132 errorText_ = errorStream_.str();
3136 // Assuming WINDOWS host is always little-endian.
3137 stream_.doByteSwap[mode] = false;
3138 stream_.userFormat = format;
3139 stream_.deviceFormat[mode] = 0;
3140 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB ) {
3141 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
3142 if ( channelInfo.type == ASIOSTInt16MSB ) stream_.doByteSwap[mode] = true;
3144 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB ) {
3145 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
3146 if ( channelInfo.type == ASIOSTInt32MSB ) stream_.doByteSwap[mode] = true;
3148 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB ) {
3149 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
3150 if ( channelInfo.type == ASIOSTFloat32MSB ) stream_.doByteSwap[mode] = true;
3152 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB ) {
3153 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
3154 if ( channelInfo.type == ASIOSTFloat64MSB ) stream_.doByteSwap[mode] = true;
3156 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB ) {
3157 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
3158 if ( channelInfo.type == ASIOSTInt24MSB ) stream_.doByteSwap[mode] = true;
3161 if ( stream_.deviceFormat[mode] == 0 ) {
3162 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") data format not supported by RtAudio.";
3163 errorText_ = errorStream_.str();
3167 // Set the buffer size. For a duplex stream, this will end up
3168 // setting the buffer size based on the input constraints, which
3170 long minSize, maxSize, preferSize, granularity;
3171 result = ASIOGetBufferSize( &minSize, &maxSize, &preferSize, &granularity );
3172 if ( result != ASE_OK ) {
3173 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting buffer size.";
3174 errorText_ = errorStream_.str();
3178 if ( isDuplexInput ) {
3179 // When this is the duplex input (output was opened before), then we have to use the same
3180 // buffersize as the output, because it might use the preferred buffer size, which most
3181 // likely wasn't passed as input to this. The buffer sizes have to be identically anyway,
3182 // So instead of throwing an error, make them equal. The caller uses the reference
3183 // to the "bufferSize" param as usual to set up processing buffers.
3185 *bufferSize = stream_.bufferSize;
3188 if ( *bufferSize == 0 ) *bufferSize = preferSize;
3189 else if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
3190 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
3191 else if ( granularity == -1 ) {
3192 // Make sure bufferSize is a power of two.
3193 int log2_of_min_size = 0;
3194 int log2_of_max_size = 0;
3196 for ( unsigned int i = 0; i < sizeof(long) * 8; i++ ) {
3197 if ( minSize & ((long)1 << i) ) log2_of_min_size = i;
3198 if ( maxSize & ((long)1 << i) ) log2_of_max_size = i;
3201 long min_delta = std::abs( (long)*bufferSize - ((long)1 << log2_of_min_size) );
3202 int min_delta_num = log2_of_min_size;
3204 for (int i = log2_of_min_size + 1; i <= log2_of_max_size; i++) {
3205 long current_delta = std::abs( (long)*bufferSize - ((long)1 << i) );
3206 if (current_delta < min_delta) {
3207 min_delta = current_delta;
3212 *bufferSize = ( (unsigned int)1 << min_delta_num );
3213 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
3214 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
3216 else if ( granularity != 0 ) {
3217 // Set to an even multiple of granularity, rounding up.
3218 *bufferSize = (*bufferSize + granularity-1) / granularity * granularity;
3223 // we don't use it anymore, see above!
3224 // Just left it here for the case...
3225 if ( isDuplexInput && stream_.bufferSize != *bufferSize ) {
3226 errorText_ = "RtApiAsio::probeDeviceOpen: input/output buffersize discrepancy!";
3231 stream_.bufferSize = *bufferSize;
3232 stream_.nBuffers = 2;
3234 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
3235 else stream_.userInterleaved = true;
3237 // ASIO always uses non-interleaved buffers.
3238 stream_.deviceInterleaved[mode] = false;
3240 // Allocate, if necessary, our AsioHandle structure for the stream.
3241 if ( handle == 0 ) {
3243 handle = new AsioHandle;
3245 catch ( std::bad_alloc& ) {
3246 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating AsioHandle memory.";
3249 handle->bufferInfos = 0;
3251 // Create a manual-reset event.
3252 handle->condition = CreateEvent( NULL, // no security
3253 TRUE, // manual-reset
3254 FALSE, // non-signaled initially
3256 stream_.apiHandle = (void *) handle;
3259 // Create the ASIO internal buffers. Since RtAudio sets up input
3260 // and output separately, we'll have to dispose of previously
3261 // created output buffers for a duplex stream.
3262 if ( mode == INPUT && stream_.mode == OUTPUT ) {
3263 ASIODisposeBuffers();
3264 if ( handle->bufferInfos ) free( handle->bufferInfos );
3267 // Allocate, initialize, and save the bufferInfos in our stream callbackInfo structure.
3269 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
3270 handle->bufferInfos = (ASIOBufferInfo *) malloc( nChannels * sizeof(ASIOBufferInfo) );
3271 if ( handle->bufferInfos == NULL ) {
3272 errorStream_ << "RtApiAsio::probeDeviceOpen: error allocating bufferInfo memory for driver (" << driverName << ").";
3273 errorText_ = errorStream_.str();
3277 ASIOBufferInfo *infos;
3278 infos = handle->bufferInfos;
3279 for ( i=0; i<stream_.nDeviceChannels[0]; i++, infos++ ) {
3280 infos->isInput = ASIOFalse;
3281 infos->channelNum = i + stream_.channelOffset[0];
3282 infos->buffers[0] = infos->buffers[1] = 0;
3284 for ( i=0; i<stream_.nDeviceChannels[1]; i++, infos++ ) {
3285 infos->isInput = ASIOTrue;
3286 infos->channelNum = i + stream_.channelOffset[1];
3287 infos->buffers[0] = infos->buffers[1] = 0;
3290 // prepare for callbacks
3291 stream_.sampleRate = sampleRate;
3292 stream_.device[mode] = device;
3293 stream_.mode = isDuplexInput ? DUPLEX : mode;
3295 // store this class instance before registering callbacks, that are going to use it
3296 asioCallbackInfo = &stream_.callbackInfo;
3297 stream_.callbackInfo.object = (void *) this;
3299 // Set up the ASIO callback structure and create the ASIO data buffers.
3300 asioCallbacks.bufferSwitch = &bufferSwitch;
3301 asioCallbacks.sampleRateDidChange = &sampleRateChanged;
3302 asioCallbacks.asioMessage = &asioMessages;
3303 asioCallbacks.bufferSwitchTimeInfo = NULL;
3304 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
3305 if ( result != ASE_OK ) {
3306 // Standard method failed. This can happen with strict/misbehaving drivers that return valid buffer size ranges
3307 // but only accept the preferred buffer size as parameter for ASIOCreateBuffers. eg. Creatives ASIO driver
3308 // in that case, let's be naïve and try that instead
3309 *bufferSize = preferSize;
3310 stream_.bufferSize = *bufferSize;
3311 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
3314 if ( result != ASE_OK ) {
3315 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") creating buffers.";
3316 errorText_ = errorStream_.str();
3319 buffersAllocated = true;
3320 stream_.state = STREAM_STOPPED;
3322 // Set flags for buffer conversion.
3323 stream_.doConvertBuffer[mode] = false;
3324 if ( stream_.userFormat != stream_.deviceFormat[mode] )
3325 stream_.doConvertBuffer[mode] = true;
3326 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
3327 stream_.nUserChannels[mode] > 1 )
3328 stream_.doConvertBuffer[mode] = true;
3330 // Allocate necessary internal buffers
3331 unsigned long bufferBytes;
3332 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
3333 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
3334 if ( stream_.userBuffer[mode] == NULL ) {
3335 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating user buffer memory.";
3339 if ( stream_.doConvertBuffer[mode] ) {
3341 bool makeBuffer = true;
3342 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
3343 if ( isDuplexInput && stream_.deviceBuffer ) {
3344 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
3345 if ( bufferBytes <= bytesOut ) makeBuffer = false;
3349 bufferBytes *= *bufferSize;
3350 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
3351 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
3352 if ( stream_.deviceBuffer == NULL ) {
3353 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating device buffer memory.";
3359 // Determine device latencies
3360 long inputLatency, outputLatency;
3361 result = ASIOGetLatencies( &inputLatency, &outputLatency );
3362 if ( result != ASE_OK ) {
3363 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting latency.";
3364 errorText_ = errorStream_.str();
3365 error( RtAudioError::WARNING); // warn but don't fail
3368 stream_.latency[0] = outputLatency;
3369 stream_.latency[1] = inputLatency;
3372 // Setup the buffer conversion information structure. We don't use
3373 // buffers to do channel offsets, so we override that parameter
3375 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
3380 if ( !isDuplexInput ) {
3381 // the cleanup for error in the duplex input, is done by RtApi::openStream
3382 // So we clean up for single channel only
3384 if ( buffersAllocated )
3385 ASIODisposeBuffers();
3387 drivers.removeCurrentDriver();
3390 CloseHandle( handle->condition );
3391 if ( handle->bufferInfos )
3392 free( handle->bufferInfos );
3395 stream_.apiHandle = 0;
3399 if ( stream_.userBuffer[mode] ) {
3400 free( stream_.userBuffer[mode] );
3401 stream_.userBuffer[mode] = 0;
3404 if ( stream_.deviceBuffer ) {
3405 free( stream_.deviceBuffer );
3406 stream_.deviceBuffer = 0;
3411 }////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
3413 void RtApiAsio :: closeStream()
3415 if ( stream_.state == STREAM_CLOSED ) {
3416 errorText_ = "RtApiAsio::closeStream(): no open stream to close!";
3417 error( RtAudioError::WARNING );
3421 if ( stream_.state == STREAM_RUNNING ) {
3422 stream_.state = STREAM_STOPPED;
3425 ASIODisposeBuffers();
3426 drivers.removeCurrentDriver();
3428 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3430 CloseHandle( handle->condition );
3431 if ( handle->bufferInfos )
3432 free( handle->bufferInfos );
3434 stream_.apiHandle = 0;
3437 for ( int i=0; i<2; i++ ) {
3438 if ( stream_.userBuffer[i] ) {
3439 free( stream_.userBuffer[i] );
3440 stream_.userBuffer[i] = 0;
3444 if ( stream_.deviceBuffer ) {
3445 free( stream_.deviceBuffer );
3446 stream_.deviceBuffer = 0;
3449 stream_.mode = UNINITIALIZED;
3450 stream_.state = STREAM_CLOSED;
3453 bool stopThreadCalled = false;
3455 void RtApiAsio :: startStream()
3458 if ( stream_.state == STREAM_RUNNING ) {
3459 errorText_ = "RtApiAsio::startStream(): the stream is already running!";
3460 error( RtAudioError::WARNING );
3464 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3465 ASIOError result = ASIOStart();
3466 if ( result != ASE_OK ) {
3467 errorStream_ << "RtApiAsio::startStream: error (" << getAsioErrorString( result ) << ") starting device.";
3468 errorText_ = errorStream_.str();
3472 handle->drainCounter = 0;
3473 handle->internalDrain = false;
3474 ResetEvent( handle->condition );
3475 stream_.state = STREAM_RUNNING;
3479 stopThreadCalled = false;
3481 if ( result == ASE_OK ) return;
3482 error( RtAudioError::SYSTEM_ERROR );
3485 void RtApiAsio :: stopStream()
3488 if ( stream_.state == STREAM_STOPPED ) {
3489 errorText_ = "RtApiAsio::stopStream(): the stream is already stopped!";
3490 error( RtAudioError::WARNING );
3494 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3495 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
3496 if ( handle->drainCounter == 0 ) {
3497 handle->drainCounter = 2;
3498 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
3502 stream_.state = STREAM_STOPPED;
3504 ASIOError result = ASIOStop();
3505 if ( result != ASE_OK ) {
3506 errorStream_ << "RtApiAsio::stopStream: error (" << getAsioErrorString( result ) << ") stopping device.";
3507 errorText_ = errorStream_.str();
3510 if ( result == ASE_OK ) return;
3511 error( RtAudioError::SYSTEM_ERROR );
3514 void RtApiAsio :: abortStream()
3517 if ( stream_.state == STREAM_STOPPED ) {
3518 errorText_ = "RtApiAsio::abortStream(): the stream is already stopped!";
3519 error( RtAudioError::WARNING );
3523 // The following lines were commented-out because some behavior was
3524 // noted where the device buffers need to be zeroed to avoid
3525 // continuing sound, even when the device buffers are completely
3526 // disposed. So now, calling abort is the same as calling stop.
3527 // AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3528 // handle->drainCounter = 2;
3532 // This function will be called by a spawned thread when the user
3533 // callback function signals that the stream should be stopped or
3534 // aborted. It is necessary to handle it this way because the
3535 // callbackEvent() function must return before the ASIOStop()
3536 // function will return.
3537 static unsigned __stdcall asioStopStream( void *ptr )
3539 CallbackInfo *info = (CallbackInfo *) ptr;
3540 RtApiAsio *object = (RtApiAsio *) info->object;
3542 object->stopStream();
3547 bool RtApiAsio :: callbackEvent( long bufferIndex )
3549 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
3550 if ( stream_.state == STREAM_CLOSED ) {
3551 errorText_ = "RtApiAsio::callbackEvent(): the stream is closed ... this shouldn't happen!";
3552 error( RtAudioError::WARNING );
3556 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
3557 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3559 // Check if we were draining the stream and signal if finished.
3560 if ( handle->drainCounter > 3 ) {
3562 stream_.state = STREAM_STOPPING;
3563 if ( handle->internalDrain == false )
3564 SetEvent( handle->condition );
3565 else { // spawn a thread to stop the stream
3567 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
3568 &stream_.callbackInfo, 0, &threadId );
3573 // Invoke user callback to get fresh output data UNLESS we are
3575 if ( handle->drainCounter == 0 ) {
3576 RtAudioCallback callback = (RtAudioCallback) info->callback;
3577 double streamTime = getStreamTime();
3578 RtAudioStreamStatus status = 0;
3579 if ( stream_.mode != INPUT && asioXRun == true ) {
3580 status |= RTAUDIO_OUTPUT_UNDERFLOW;
3583 if ( stream_.mode != OUTPUT && asioXRun == true ) {
3584 status |= RTAUDIO_INPUT_OVERFLOW;
3587 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
3588 stream_.bufferSize, streamTime, status, info->userData );
3589 if ( cbReturnValue == 2 ) {
3590 stream_.state = STREAM_STOPPING;
3591 handle->drainCounter = 2;
3593 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
3594 &stream_.callbackInfo, 0, &threadId );
3597 else if ( cbReturnValue == 1 ) {
3598 handle->drainCounter = 1;
3599 handle->internalDrain = true;
3603 unsigned int nChannels, bufferBytes, i, j;
3604 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
3605 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
3607 bufferBytes = stream_.bufferSize * formatBytes( stream_.deviceFormat[0] );
3609 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
3611 for ( i=0, j=0; i<nChannels; i++ ) {
3612 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3613 memset( handle->bufferInfos[i].buffers[bufferIndex], 0, bufferBytes );
3617 else if ( stream_.doConvertBuffer[0] ) {
3619 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
3620 if ( stream_.doByteSwap[0] )
3621 byteSwapBuffer( stream_.deviceBuffer,
3622 stream_.bufferSize * stream_.nDeviceChannels[0],
3623 stream_.deviceFormat[0] );
3625 for ( i=0, j=0; i<nChannels; i++ ) {
3626 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3627 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
3628 &stream_.deviceBuffer[j++*bufferBytes], bufferBytes );
3634 if ( stream_.doByteSwap[0] )
3635 byteSwapBuffer( stream_.userBuffer[0],
3636 stream_.bufferSize * stream_.nUserChannels[0],
3637 stream_.userFormat );
3639 for ( i=0, j=0; i<nChannels; i++ ) {
3640 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3641 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
3642 &stream_.userBuffer[0][bufferBytes*j++], bufferBytes );
3648 // Don't bother draining input
3649 if ( handle->drainCounter ) {
3650 handle->drainCounter++;
3654 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
3656 bufferBytes = stream_.bufferSize * formatBytes(stream_.deviceFormat[1]);
3658 if (stream_.doConvertBuffer[1]) {
3660 // Always interleave ASIO input data.
3661 for ( i=0, j=0; i<nChannels; i++ ) {
3662 if ( handle->bufferInfos[i].isInput == ASIOTrue )
3663 memcpy( &stream_.deviceBuffer[j++*bufferBytes],
3664 handle->bufferInfos[i].buffers[bufferIndex],
3668 if ( stream_.doByteSwap[1] )
3669 byteSwapBuffer( stream_.deviceBuffer,
3670 stream_.bufferSize * stream_.nDeviceChannels[1],
3671 stream_.deviceFormat[1] );
3672 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
3676 for ( i=0, j=0; i<nChannels; i++ ) {
3677 if ( handle->bufferInfos[i].isInput == ASIOTrue ) {
3678 memcpy( &stream_.userBuffer[1][bufferBytes*j++],
3679 handle->bufferInfos[i].buffers[bufferIndex],
3684 if ( stream_.doByteSwap[1] )
3685 byteSwapBuffer( stream_.userBuffer[1],
3686 stream_.bufferSize * stream_.nUserChannels[1],
3687 stream_.userFormat );
3692 // The following call was suggested by Malte Clasen. While the API
3693 // documentation indicates it should not be required, some device
3694 // drivers apparently do not function correctly without it.
3697 RtApi::tickStreamTime();
3701 static void sampleRateChanged( ASIOSampleRate sRate )
3703 // The ASIO documentation says that this usually only happens during
3704 // external sync. Audio processing is not stopped by the driver,
3705 // actual sample rate might not have even changed, maybe only the
3706 // sample rate status of an AES/EBU or S/PDIF digital input at the
3709 RtApi *object = (RtApi *) asioCallbackInfo->object;
3711 object->stopStream();
3713 catch ( RtAudioError &exception ) {
3714 std::cerr << "\nRtApiAsio: sampleRateChanged() error (" << exception.getMessage() << ")!\n" << std::endl;
3718 std::cerr << "\nRtApiAsio: driver reports sample rate changed to " << sRate << " ... stream stopped!!!\n" << std::endl;
3721 static long asioMessages( long selector, long value, void* /*message*/, double* /*opt*/ )
3725 switch( selector ) {
3726 case kAsioSelectorSupported:
3727 if ( value == kAsioResetRequest
3728 || value == kAsioEngineVersion
3729 || value == kAsioResyncRequest
3730 || value == kAsioLatenciesChanged
3731 // The following three were added for ASIO 2.0, you don't
3732 // necessarily have to support them.
3733 || value == kAsioSupportsTimeInfo
3734 || value == kAsioSupportsTimeCode
3735 || value == kAsioSupportsInputMonitor)
3738 case kAsioResetRequest:
3739 // Defer the task and perform the reset of the driver during the
3740 // next "safe" situation. You cannot reset the driver right now,
3741 // as this code is called from the driver. Reset the driver is
3742 // done by completely destruct is. I.e. ASIOStop(),
3743 // ASIODisposeBuffers(), Destruction Afterwards you initialize the
3745 std::cerr << "\nRtApiAsio: driver reset requested!!!" << std::endl;
3748 case kAsioResyncRequest:
3749 // This informs the application that the driver encountered some
3750 // non-fatal data loss. It is used for synchronization purposes
3751 // of different media. Added mainly to work around the Win16Mutex
3752 // problems in Windows 95/98 with the Windows Multimedia system,
3753 // which could lose data because the Mutex was held too long by
3754 // another thread. However a driver can issue it in other
3756 // std::cerr << "\nRtApiAsio: driver resync requested!!!" << std::endl;
3760 case kAsioLatenciesChanged:
3761 // This will inform the host application that the drivers were
3762 // latencies changed. Beware, it this does not mean that the
3763 // buffer sizes have changed! You might need to update internal
3765 std::cerr << "\nRtApiAsio: driver latency may have changed!!!" << std::endl;
3768 case kAsioEngineVersion:
3769 // Return the supported ASIO version of the host application. If
3770 // a host application does not implement this selector, ASIO 1.0
3771 // is assumed by the driver.
3774 case kAsioSupportsTimeInfo:
3775 // Informs the driver whether the
3776 // asioCallbacks.bufferSwitchTimeInfo() callback is supported.
3777 // For compatibility with ASIO 1.0 drivers the host application
3778 // should always support the "old" bufferSwitch method, too.
3781 case kAsioSupportsTimeCode:
3782 // Informs the driver whether application is interested in time
3783 // code info. If an application does not need to know about time
3784 // code, the driver has less work to do.
3791 static const char* getAsioErrorString( ASIOError result )
3799 static const Messages m[] =
3801 { ASE_NotPresent, "Hardware input or output is not present or available." },
3802 { ASE_HWMalfunction, "Hardware is malfunctioning." },
3803 { ASE_InvalidParameter, "Invalid input parameter." },
3804 { ASE_InvalidMode, "Invalid mode." },
3805 { ASE_SPNotAdvancing, "Sample position not advancing." },
3806 { ASE_NoClock, "Sample clock or rate cannot be determined or is not present." },
3807 { ASE_NoMemory, "Not enough memory to complete the request." }
3810 for ( unsigned int i = 0; i < sizeof(m)/sizeof(m[0]); ++i )
3811 if ( m[i].value == result ) return m[i].message;
3813 return "Unknown error.";
3816 //******************** End of __WINDOWS_ASIO__ *********************//
3820 #if defined(__WINDOWS_WASAPI__) // Windows WASAPI API
3822 // Authored by Marcus Tomlinson <themarcustomlinson@gmail.com>, April 2014
3823 // - Introduces support for the Windows WASAPI API
3824 // - Aims to deliver bit streams to and from hardware at the lowest possible latency, via the absolute minimum buffer sizes required
3825 // - Provides flexible stream configuration to an otherwise strict and inflexible WASAPI interface
3826 // - Includes automatic internal conversion of sample rate and buffer size between hardware and the user
3831 #include <audioclient.h>
3833 #include <mmdeviceapi.h>
3834 #include <functiondiscoverykeys_devpkey.h>
3837 //=============================================================================
3839 #define SAFE_RELEASE( objectPtr )\
3842 objectPtr->Release();\
3846 typedef HANDLE ( __stdcall *TAvSetMmThreadCharacteristicsPtr )( LPCWSTR TaskName, LPDWORD TaskIndex );
3848 //-----------------------------------------------------------------------------
3850 // WASAPI dictates stream sample rate, format, channel count, and in some cases, buffer size.
3851 // Therefore we must perform all necessary conversions to user buffers in order to satisfy these
3852 // requirements. WasapiBuffer ring buffers are used between HwIn->UserIn and UserOut->HwOut to
3853 // provide intermediate storage for read / write synchronization.
3867 // sets the length of the internal ring buffer
3868 void setBufferSize( unsigned int bufferSize, unsigned int formatBytes ) {
3871 buffer_ = ( char* ) calloc( bufferSize, formatBytes );
3873 bufferSize_ = bufferSize;
3878 // attempt to push a buffer into the ring buffer at the current "in" index
3879 bool pushBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
3881 if ( !buffer || // incoming buffer is NULL
3882 bufferSize == 0 || // incoming buffer has no data
3883 bufferSize > bufferSize_ ) // incoming buffer too large
3888 unsigned int relOutIndex = outIndex_;
3889 unsigned int inIndexEnd = inIndex_ + bufferSize;
3890 if ( relOutIndex < inIndex_ && inIndexEnd >= bufferSize_ ) {
3891 relOutIndex += bufferSize_;
3894 // "in" index can end on the "out" index but cannot begin at it
3895 if ( inIndex_ <= relOutIndex && inIndexEnd > relOutIndex ) {
3896 return false; // not enough space between "in" index and "out" index
3899 // copy buffer from external to internal
3900 int fromZeroSize = inIndex_ + bufferSize - bufferSize_;
3901 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
3902 int fromInSize = bufferSize - fromZeroSize;
3907 memcpy( &( ( char* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( char ) );
3908 memcpy( buffer_, &( ( char* ) buffer )[fromInSize], fromZeroSize * sizeof( char ) );
3910 case RTAUDIO_SINT16:
3911 memcpy( &( ( short* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( short ) );
3912 memcpy( buffer_, &( ( short* ) buffer )[fromInSize], fromZeroSize * sizeof( short ) );
3914 case RTAUDIO_SINT24:
3915 memcpy( &( ( S24* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( S24 ) );
3916 memcpy( buffer_, &( ( S24* ) buffer )[fromInSize], fromZeroSize * sizeof( S24 ) );
3918 case RTAUDIO_SINT32:
3919 memcpy( &( ( int* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( int ) );
3920 memcpy( buffer_, &( ( int* ) buffer )[fromInSize], fromZeroSize * sizeof( int ) );
3922 case RTAUDIO_FLOAT32:
3923 memcpy( &( ( float* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( float ) );
3924 memcpy( buffer_, &( ( float* ) buffer )[fromInSize], fromZeroSize * sizeof( float ) );
3926 case RTAUDIO_FLOAT64:
3927 memcpy( &( ( double* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( double ) );
3928 memcpy( buffer_, &( ( double* ) buffer )[fromInSize], fromZeroSize * sizeof( double ) );
3932 // update "in" index
3933 inIndex_ += bufferSize;
3934 inIndex_ %= bufferSize_;
3939 // attempt to pull a buffer from the ring buffer from the current "out" index
3940 bool pullBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
3942 if ( !buffer || // incoming buffer is NULL
3943 bufferSize == 0 || // incoming buffer has no data
3944 bufferSize > bufferSize_ ) // incoming buffer too large
3949 unsigned int relInIndex = inIndex_;
3950 unsigned int outIndexEnd = outIndex_ + bufferSize;
3951 if ( relInIndex < outIndex_ && outIndexEnd >= bufferSize_ ) {
3952 relInIndex += bufferSize_;
3955 // "out" index can begin at and end on the "in" index
3956 if ( outIndex_ < relInIndex && outIndexEnd > relInIndex ) {
3957 return false; // not enough space between "out" index and "in" index
3960 // copy buffer from internal to external
3961 int fromZeroSize = outIndex_ + bufferSize - bufferSize_;
3962 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
3963 int fromOutSize = bufferSize - fromZeroSize;
3968 memcpy( buffer, &( ( char* ) buffer_ )[outIndex_], fromOutSize * sizeof( char ) );
3969 memcpy( &( ( char* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( char ) );
3971 case RTAUDIO_SINT16:
3972 memcpy( buffer, &( ( short* ) buffer_ )[outIndex_], fromOutSize * sizeof( short ) );
3973 memcpy( &( ( short* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( short ) );
3975 case RTAUDIO_SINT24:
3976 memcpy( buffer, &( ( S24* ) buffer_ )[outIndex_], fromOutSize * sizeof( S24 ) );
3977 memcpy( &( ( S24* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( S24 ) );
3979 case RTAUDIO_SINT32:
3980 memcpy( buffer, &( ( int* ) buffer_ )[outIndex_], fromOutSize * sizeof( int ) );
3981 memcpy( &( ( int* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( int ) );
3983 case RTAUDIO_FLOAT32:
3984 memcpy( buffer, &( ( float* ) buffer_ )[outIndex_], fromOutSize * sizeof( float ) );
3985 memcpy( &( ( float* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( float ) );
3987 case RTAUDIO_FLOAT64:
3988 memcpy( buffer, &( ( double* ) buffer_ )[outIndex_], fromOutSize * sizeof( double ) );
3989 memcpy( &( ( double* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( double ) );
3993 // update "out" index
3994 outIndex_ += bufferSize;
3995 outIndex_ %= bufferSize_;
4002 unsigned int bufferSize_;
4003 unsigned int inIndex_;
4004 unsigned int outIndex_;
4007 //-----------------------------------------------------------------------------
4009 // A structure to hold various information related to the WASAPI implementation.
4012 IAudioClient* captureAudioClient;
4013 IAudioClient* renderAudioClient;
4014 IAudioCaptureClient* captureClient;
4015 IAudioRenderClient* renderClient;
4016 HANDLE captureEvent;
4020 : captureAudioClient( NULL ),
4021 renderAudioClient( NULL ),
4022 captureClient( NULL ),
4023 renderClient( NULL ),
4024 captureEvent( NULL ),
4025 renderEvent( NULL ) {}
4028 //=============================================================================
4030 RtApiWasapi::RtApiWasapi()
4031 : coInitialized_( false ), deviceEnumerator_( NULL )
4033 // WASAPI can run either apartment or multi-threaded
4034 HRESULT hr = CoInitialize( NULL );
4035 if ( !FAILED( hr ) )
4036 coInitialized_ = true;
4038 // Instantiate device enumerator
4039 hr = CoCreateInstance( __uuidof( MMDeviceEnumerator ), NULL,
4040 CLSCTX_ALL, __uuidof( IMMDeviceEnumerator ),
4041 ( void** ) &deviceEnumerator_ );
4043 if ( FAILED( hr ) ) {
4044 errorText_ = "RtApiWasapi::RtApiWasapi: Unable to instantiate device enumerator";
4045 error( RtAudioError::DRIVER_ERROR );
4049 //-----------------------------------------------------------------------------
4051 RtApiWasapi::~RtApiWasapi()
4053 if ( stream_.state != STREAM_CLOSED )
4056 SAFE_RELEASE( deviceEnumerator_ );
4058 // If this object previously called CoInitialize()
4059 if ( coInitialized_ )
4063 //=============================================================================
4065 unsigned int RtApiWasapi::getDeviceCount( void )
4067 unsigned int captureDeviceCount = 0;
4068 unsigned int renderDeviceCount = 0;
4070 IMMDeviceCollection* captureDevices = NULL;
4071 IMMDeviceCollection* renderDevices = NULL;
4073 // Count capture devices
4075 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4076 if ( FAILED( hr ) ) {
4077 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device collection.";
4081 hr = captureDevices->GetCount( &captureDeviceCount );
4082 if ( FAILED( hr ) ) {
4083 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device count.";
4087 // Count render devices
4088 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4089 if ( FAILED( hr ) ) {
4090 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device collection.";
4094 hr = renderDevices->GetCount( &renderDeviceCount );
4095 if ( FAILED( hr ) ) {
4096 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device count.";
4101 // release all references
4102 SAFE_RELEASE( captureDevices );
4103 SAFE_RELEASE( renderDevices );
4105 if ( errorText_.empty() )
4106 return captureDeviceCount + renderDeviceCount;
4108 error( RtAudioError::DRIVER_ERROR );
4112 //-----------------------------------------------------------------------------
4114 RtAudio::DeviceInfo RtApiWasapi::getDeviceInfo( unsigned int device )
4116 RtAudio::DeviceInfo info;
4117 unsigned int captureDeviceCount = 0;
4118 unsigned int renderDeviceCount = 0;
4119 std::string defaultDeviceName;
4120 bool isCaptureDevice = false;
4122 PROPVARIANT deviceNameProp;
4123 PROPVARIANT defaultDeviceNameProp;
4125 IMMDeviceCollection* captureDevices = NULL;
4126 IMMDeviceCollection* renderDevices = NULL;
4127 IMMDevice* devicePtr = NULL;
4128 IMMDevice* defaultDevicePtr = NULL;
4129 IAudioClient* audioClient = NULL;
4130 IPropertyStore* devicePropStore = NULL;
4131 IPropertyStore* defaultDevicePropStore = NULL;
4133 WAVEFORMATEX* deviceFormat = NULL;
4134 WAVEFORMATEX* closestMatchFormat = NULL;
4137 info.probed = false;
4139 // Count capture devices
4141 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4142 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4143 if ( FAILED( hr ) ) {
4144 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device collection.";
4148 hr = captureDevices->GetCount( &captureDeviceCount );
4149 if ( FAILED( hr ) ) {
4150 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device count.";
4154 // Count render devices
4155 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4156 if ( FAILED( hr ) ) {
4157 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device collection.";
4161 hr = renderDevices->GetCount( &renderDeviceCount );
4162 if ( FAILED( hr ) ) {
4163 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device count.";
4167 // validate device index
4168 if ( device >= captureDeviceCount + renderDeviceCount ) {
4169 errorText_ = "RtApiWasapi::getDeviceInfo: Invalid device index.";
4170 errorType = RtAudioError::INVALID_USE;
4174 // determine whether index falls within capture or render devices
4175 if ( device >= renderDeviceCount ) {
4176 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
4177 if ( FAILED( hr ) ) {
4178 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device handle.";
4181 isCaptureDevice = true;
4184 hr = renderDevices->Item( device, &devicePtr );
4185 if ( FAILED( hr ) ) {
4186 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device handle.";
4189 isCaptureDevice = false;
4192 // get default device name
4193 if ( isCaptureDevice ) {
4194 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eCapture, eConsole, &defaultDevicePtr );
4195 if ( FAILED( hr ) ) {
4196 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default capture device handle.";
4201 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eRender, eConsole, &defaultDevicePtr );
4202 if ( FAILED( hr ) ) {
4203 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default render device handle.";
4208 hr = defaultDevicePtr->OpenPropertyStore( STGM_READ, &defaultDevicePropStore );
4209 if ( FAILED( hr ) ) {
4210 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open default device property store.";
4213 PropVariantInit( &defaultDeviceNameProp );
4215 hr = defaultDevicePropStore->GetValue( PKEY_Device_FriendlyName, &defaultDeviceNameProp );
4216 if ( FAILED( hr ) ) {
4217 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default device property: PKEY_Device_FriendlyName.";
4221 defaultDeviceName = convertCharPointerToStdString(defaultDeviceNameProp.pwszVal);
4224 hr = devicePtr->OpenPropertyStore( STGM_READ, &devicePropStore );
4225 if ( FAILED( hr ) ) {
4226 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open device property store.";
4230 PropVariantInit( &deviceNameProp );
4232 hr = devicePropStore->GetValue( PKEY_Device_FriendlyName, &deviceNameProp );
4233 if ( FAILED( hr ) ) {
4234 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device property: PKEY_Device_FriendlyName.";
4238 info.name =convertCharPointerToStdString(deviceNameProp.pwszVal);
4241 if ( isCaptureDevice ) {
4242 info.isDefaultInput = info.name == defaultDeviceName;
4243 info.isDefaultOutput = false;
4246 info.isDefaultInput = false;
4247 info.isDefaultOutput = info.name == defaultDeviceName;
4251 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL, NULL, ( void** ) &audioClient );
4252 if ( FAILED( hr ) ) {
4253 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device audio client.";
4257 hr = audioClient->GetMixFormat( &deviceFormat );
4258 if ( FAILED( hr ) ) {
4259 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device mix format.";
4263 if ( isCaptureDevice ) {
4264 info.inputChannels = deviceFormat->nChannels;
4265 info.outputChannels = 0;
4266 info.duplexChannels = 0;
4269 info.inputChannels = 0;
4270 info.outputChannels = deviceFormat->nChannels;
4271 info.duplexChannels = 0;
4274 // sample rates (WASAPI only supports the one native sample rate)
4275 info.preferredSampleRate = deviceFormat->nSamplesPerSec;
4277 info.sampleRates.clear();
4278 info.sampleRates.push_back( deviceFormat->nSamplesPerSec );
4281 info.nativeFormats = 0;
4283 if ( deviceFormat->wFormatTag == WAVE_FORMAT_IEEE_FLOAT ||
4284 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
4285 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_IEEE_FLOAT ) )
4287 if ( deviceFormat->wBitsPerSample == 32 ) {
4288 info.nativeFormats |= RTAUDIO_FLOAT32;
4290 else if ( deviceFormat->wBitsPerSample == 64 ) {
4291 info.nativeFormats |= RTAUDIO_FLOAT64;
4294 else if ( deviceFormat->wFormatTag == WAVE_FORMAT_PCM ||
4295 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
4296 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_PCM ) )
4298 if ( deviceFormat->wBitsPerSample == 8 ) {
4299 info.nativeFormats |= RTAUDIO_SINT8;
4301 else if ( deviceFormat->wBitsPerSample == 16 ) {
4302 info.nativeFormats |= RTAUDIO_SINT16;
4304 else if ( deviceFormat->wBitsPerSample == 24 ) {
4305 info.nativeFormats |= RTAUDIO_SINT24;
4307 else if ( deviceFormat->wBitsPerSample == 32 ) {
4308 info.nativeFormats |= RTAUDIO_SINT32;
4316 // release all references
4317 PropVariantClear( &deviceNameProp );
4318 PropVariantClear( &defaultDeviceNameProp );
4320 SAFE_RELEASE( captureDevices );
4321 SAFE_RELEASE( renderDevices );
4322 SAFE_RELEASE( devicePtr );
4323 SAFE_RELEASE( defaultDevicePtr );
4324 SAFE_RELEASE( audioClient );
4325 SAFE_RELEASE( devicePropStore );
4326 SAFE_RELEASE( defaultDevicePropStore );
4328 CoTaskMemFree( deviceFormat );
4329 CoTaskMemFree( closestMatchFormat );
4331 if ( !errorText_.empty() )
4336 //-----------------------------------------------------------------------------
4338 unsigned int RtApiWasapi::getDefaultOutputDevice( void )
4340 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
4341 if ( getDeviceInfo( i ).isDefaultOutput ) {
4349 //-----------------------------------------------------------------------------
4351 unsigned int RtApiWasapi::getDefaultInputDevice( void )
4353 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
4354 if ( getDeviceInfo( i ).isDefaultInput ) {
4362 //-----------------------------------------------------------------------------
4364 void RtApiWasapi::closeStream( void )
4366 if ( stream_.state == STREAM_CLOSED ) {
4367 errorText_ = "RtApiWasapi::closeStream: No open stream to close.";
4368 error( RtAudioError::WARNING );
4372 if ( stream_.state != STREAM_STOPPED )
4375 // clean up stream memory
4376 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient )
4377 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient )
4379 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureClient )
4380 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderClient )
4382 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent )
4383 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent );
4385 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent )
4386 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent );
4388 delete ( WasapiHandle* ) stream_.apiHandle;
4389 stream_.apiHandle = NULL;
4391 for ( int i = 0; i < 2; i++ ) {
4392 if ( stream_.userBuffer[i] ) {
4393 free( stream_.userBuffer[i] );
4394 stream_.userBuffer[i] = 0;
4398 if ( stream_.deviceBuffer ) {
4399 free( stream_.deviceBuffer );
4400 stream_.deviceBuffer = 0;
4403 // update stream state
4404 stream_.state = STREAM_CLOSED;
4407 //-----------------------------------------------------------------------------
4409 void RtApiWasapi::startStream( void )
4413 if ( stream_.state == STREAM_RUNNING ) {
4414 errorText_ = "RtApiWasapi::startStream: The stream is already running.";
4415 error( RtAudioError::WARNING );
4419 // update stream state
4420 stream_.state = STREAM_RUNNING;
4422 // create WASAPI stream thread
4423 stream_.callbackInfo.thread = ( ThreadHandle ) CreateThread( NULL, 0, runWasapiThread, this, CREATE_SUSPENDED, NULL );
4425 if ( !stream_.callbackInfo.thread ) {
4426 errorText_ = "RtApiWasapi::startStream: Unable to instantiate callback thread.";
4427 error( RtAudioError::THREAD_ERROR );
4430 SetThreadPriority( ( void* ) stream_.callbackInfo.thread, stream_.callbackInfo.priority );
4431 ResumeThread( ( void* ) stream_.callbackInfo.thread );
4435 //-----------------------------------------------------------------------------
4437 void RtApiWasapi::stopStream( void )
4441 if ( stream_.state == STREAM_STOPPED ) {
4442 errorText_ = "RtApiWasapi::stopStream: The stream is already stopped.";
4443 error( RtAudioError::WARNING );
4447 // inform stream thread by setting stream state to STREAM_STOPPING
4448 stream_.state = STREAM_STOPPING;
4450 // wait until stream thread is stopped
4451 while( stream_.state != STREAM_STOPPED ) {
4455 // Wait for the last buffer to play before stopping.
4456 Sleep( 1000 * stream_.bufferSize / stream_.sampleRate );
4458 // stop capture client if applicable
4459 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) {
4460 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient->Stop();
4461 if ( FAILED( hr ) ) {
4462 errorText_ = "RtApiWasapi::stopStream: Unable to stop capture stream.";
4463 error( RtAudioError::DRIVER_ERROR );
4468 // stop render client if applicable
4469 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) {
4470 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient->Stop();
4471 if ( FAILED( hr ) ) {
4472 errorText_ = "RtApiWasapi::stopStream: Unable to stop render stream.";
4473 error( RtAudioError::DRIVER_ERROR );
4478 // close thread handle
4479 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
4480 errorText_ = "RtApiWasapi::stopStream: Unable to close callback thread.";
4481 error( RtAudioError::THREAD_ERROR );
4485 stream_.callbackInfo.thread = (ThreadHandle) NULL;
4488 //-----------------------------------------------------------------------------
4490 void RtApiWasapi::abortStream( void )
4494 if ( stream_.state == STREAM_STOPPED ) {
4495 errorText_ = "RtApiWasapi::abortStream: The stream is already stopped.";
4496 error( RtAudioError::WARNING );
4500 // inform stream thread by setting stream state to STREAM_STOPPING
4501 stream_.state = STREAM_STOPPING;
4503 // wait until stream thread is stopped
4504 while ( stream_.state != STREAM_STOPPED ) {
4508 // stop capture client if applicable
4509 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) {
4510 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient->Stop();
4511 if ( FAILED( hr ) ) {
4512 errorText_ = "RtApiWasapi::abortStream: Unable to stop capture stream.";
4513 error( RtAudioError::DRIVER_ERROR );
4518 // stop render client if applicable
4519 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) {
4520 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient->Stop();
4521 if ( FAILED( hr ) ) {
4522 errorText_ = "RtApiWasapi::abortStream: Unable to stop render stream.";
4523 error( RtAudioError::DRIVER_ERROR );
4528 // close thread handle
4529 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
4530 errorText_ = "RtApiWasapi::abortStream: Unable to close callback thread.";
4531 error( RtAudioError::THREAD_ERROR );
4535 stream_.callbackInfo.thread = (ThreadHandle) NULL;
4538 //-----------------------------------------------------------------------------
4540 bool RtApiWasapi::probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
4541 unsigned int firstChannel, unsigned int sampleRate,
4542 RtAudioFormat format, unsigned int* bufferSize,
4543 RtAudio::StreamOptions* options )
4545 bool methodResult = FAILURE;
4546 unsigned int captureDeviceCount = 0;
4547 unsigned int renderDeviceCount = 0;
4549 IMMDeviceCollection* captureDevices = NULL;
4550 IMMDeviceCollection* renderDevices = NULL;
4551 IMMDevice* devicePtr = NULL;
4552 WAVEFORMATEX* deviceFormat = NULL;
4553 unsigned int bufferBytes;
4554 stream_.state = STREAM_STOPPED;
4555 RtAudio::DeviceInfo deviceInfo;
4557 // create API Handle if not already created
4558 if ( !stream_.apiHandle )
4559 stream_.apiHandle = ( void* ) new WasapiHandle();
4561 // Count capture devices
4563 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4564 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4565 if ( FAILED( hr ) ) {
4566 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device collection.";
4570 hr = captureDevices->GetCount( &captureDeviceCount );
4571 if ( FAILED( hr ) ) {
4572 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device count.";
4576 // Count render devices
4577 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4578 if ( FAILED( hr ) ) {
4579 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device collection.";
4583 hr = renderDevices->GetCount( &renderDeviceCount );
4584 if ( FAILED( hr ) ) {
4585 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device count.";
4589 // validate device index
4590 if ( device >= captureDeviceCount + renderDeviceCount ) {
4591 errorType = RtAudioError::INVALID_USE;
4592 errorText_ = "RtApiWasapi::probeDeviceOpen: Invalid device index.";
4596 deviceInfo = getDeviceInfo( device );
4598 // validate sample rate
4599 if ( sampleRate != deviceInfo.preferredSampleRate )
4601 errorType = RtAudioError::INVALID_USE;
4602 std::stringstream ss;
4603 ss << "RtApiWasapi::probeDeviceOpen: " << sampleRate
4604 << "Hz sample rate not supported. This device only supports "
4605 << deviceInfo.preferredSampleRate << "Hz.";
4606 errorText_ = ss.str();
4610 // determine whether index falls within capture or render devices
4611 if ( device >= renderDeviceCount ) {
4612 if ( mode != INPUT ) {
4613 errorType = RtAudioError::INVALID_USE;
4614 errorText_ = "RtApiWasapi::probeDeviceOpen: Capture device selected as output device.";
4618 // retrieve captureAudioClient from devicePtr
4619 IAudioClient*& captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4621 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
4622 if ( FAILED( hr ) ) {
4623 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device handle.";
4627 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4628 NULL, ( void** ) &captureAudioClient );
4629 if ( FAILED( hr ) ) {
4630 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device audio client.";
4634 hr = captureAudioClient->GetMixFormat( &deviceFormat );
4635 if ( FAILED( hr ) ) {
4636 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device mix format.";
4640 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4641 captureAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4644 if ( mode != OUTPUT ) {
4645 errorType = RtAudioError::INVALID_USE;
4646 errorText_ = "RtApiWasapi::probeDeviceOpen: Render device selected as input device.";
4650 // retrieve renderAudioClient from devicePtr
4651 IAudioClient*& renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4653 hr = renderDevices->Item( device, &devicePtr );
4654 if ( FAILED( hr ) ) {
4655 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device handle.";
4659 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4660 NULL, ( void** ) &renderAudioClient );
4661 if ( FAILED( hr ) ) {
4662 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device audio client.";
4666 hr = renderAudioClient->GetMixFormat( &deviceFormat );
4667 if ( FAILED( hr ) ) {
4668 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device mix format.";
4672 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4673 renderAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4677 if ( ( stream_.mode == OUTPUT && mode == INPUT ) ||
4678 ( stream_.mode == INPUT && mode == OUTPUT ) ) {
4679 stream_.mode = DUPLEX;
4682 stream_.mode = mode;
4685 stream_.device[mode] = device;
4686 stream_.doByteSwap[mode] = false;
4687 stream_.sampleRate = sampleRate;
4688 stream_.bufferSize = *bufferSize;
4689 stream_.nBuffers = 1;
4690 stream_.nUserChannels[mode] = channels;
4691 stream_.channelOffset[mode] = firstChannel;
4692 stream_.userFormat = format;
4693 stream_.deviceFormat[mode] = deviceInfo.nativeFormats;
4695 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
4696 stream_.userInterleaved = false;
4698 stream_.userInterleaved = true;
4699 stream_.deviceInterleaved[mode] = true;
4701 // Set flags for buffer conversion.
4702 stream_.doConvertBuffer[mode] = false;
4703 if ( stream_.userFormat != stream_.deviceFormat[mode] ||
4704 stream_.nUserChannels != stream_.nDeviceChannels )
4705 stream_.doConvertBuffer[mode] = true;
4706 else if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
4707 stream_.nUserChannels[mode] > 1 )
4708 stream_.doConvertBuffer[mode] = true;
4710 if ( stream_.doConvertBuffer[mode] )
4711 setConvertInfo( mode, 0 );
4713 // Allocate necessary internal buffers
4714 bufferBytes = stream_.nUserChannels[mode] * stream_.bufferSize * formatBytes( stream_.userFormat );
4716 stream_.userBuffer[mode] = ( char* ) calloc( bufferBytes, 1 );
4717 if ( !stream_.userBuffer[mode] ) {
4718 errorType = RtAudioError::MEMORY_ERROR;
4719 errorText_ = "RtApiWasapi::probeDeviceOpen: Error allocating user buffer memory.";
4723 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME )
4724 stream_.callbackInfo.priority = 15;
4726 stream_.callbackInfo.priority = 0;
4728 ///! TODO: RTAUDIO_MINIMIZE_LATENCY // Provide stream buffers directly to callback
4729 ///! TODO: RTAUDIO_HOG_DEVICE // Exclusive mode
4731 methodResult = SUCCESS;
4735 SAFE_RELEASE( captureDevices );
4736 SAFE_RELEASE( renderDevices );
4737 SAFE_RELEASE( devicePtr );
4738 CoTaskMemFree( deviceFormat );
4740 // if method failed, close the stream
4741 if ( methodResult == FAILURE )
4744 if ( !errorText_.empty() )
4746 return methodResult;
4749 //=============================================================================
4751 DWORD WINAPI RtApiWasapi::runWasapiThread( void* wasapiPtr )
4754 ( ( RtApiWasapi* ) wasapiPtr )->wasapiThread();
4759 DWORD WINAPI RtApiWasapi::stopWasapiThread( void* wasapiPtr )
4762 ( ( RtApiWasapi* ) wasapiPtr )->stopStream();
4767 DWORD WINAPI RtApiWasapi::abortWasapiThread( void* wasapiPtr )
4770 ( ( RtApiWasapi* ) wasapiPtr )->abortStream();
4775 //-----------------------------------------------------------------------------
4777 void RtApiWasapi::wasapiThread()
4779 // as this is a new thread, we must CoInitialize it
4780 CoInitialize( NULL );
4784 IAudioClient* captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4785 IAudioClient* renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4786 IAudioCaptureClient* captureClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureClient;
4787 IAudioRenderClient* renderClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderClient;
4788 HANDLE captureEvent = ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent;
4789 HANDLE renderEvent = ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent;
4791 WAVEFORMATEX* captureFormat = NULL;
4792 WAVEFORMATEX* renderFormat = NULL;
4793 WasapiBuffer captureBuffer;
4794 WasapiBuffer renderBuffer;
4796 // declare local stream variables
4797 RtAudioCallback callback = ( RtAudioCallback ) stream_.callbackInfo.callback;
4798 BYTE* streamBuffer = NULL;
4799 unsigned long captureFlags = 0;
4800 unsigned int bufferFrameCount = 0;
4801 unsigned int numFramesPadding = 0;
4802 bool callbackPushed = false;
4803 bool callbackPulled = false;
4804 bool callbackStopped = false;
4805 int callbackResult = 0;
4807 unsigned int deviceBuffSize = 0;
4810 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4812 // Attempt to assign "Pro Audio" characteristic to thread
4813 HMODULE AvrtDll = LoadLibrary( (LPCTSTR) "AVRT.dll" );
4815 DWORD taskIndex = 0;
4816 TAvSetMmThreadCharacteristicsPtr AvSetMmThreadCharacteristicsPtr = ( TAvSetMmThreadCharacteristicsPtr ) GetProcAddress( AvrtDll, "AvSetMmThreadCharacteristicsW" );
4817 AvSetMmThreadCharacteristicsPtr( L"Pro Audio", &taskIndex );
4818 FreeLibrary( AvrtDll );
4821 // start capture stream if applicable
4822 if ( captureAudioClient ) {
4823 hr = captureAudioClient->GetMixFormat( &captureFormat );
4824 if ( FAILED( hr ) ) {
4825 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
4829 // initialize capture stream according to desire buffer size
4830 REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) stream_.bufferSize * 10000000 / captureFormat->nSamplesPerSec );
4832 if ( !captureClient ) {
4833 hr = captureAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
4834 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
4835 desiredBufferPeriod,
4836 desiredBufferPeriod,
4839 if ( FAILED( hr ) ) {
4840 errorText_ = "RtApiWasapi::wasapiThread: Unable to initialize capture audio client.";
4844 hr = captureAudioClient->GetService( __uuidof( IAudioCaptureClient ),
4845 ( void** ) &captureClient );
4846 if ( FAILED( hr ) ) {
4847 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve capture client handle.";
4851 // configure captureEvent to trigger on every available capture buffer
4852 captureEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
4853 if ( !captureEvent ) {
4854 errorType = RtAudioError::SYSTEM_ERROR;
4855 errorText_ = "RtApiWasapi::wasapiThread: Unable to create capture event.";
4859 hr = captureAudioClient->SetEventHandle( captureEvent );
4860 if ( FAILED( hr ) ) {
4861 errorText_ = "RtApiWasapi::wasapiThread: Unable to set capture event handle.";
4865 ( ( WasapiHandle* ) stream_.apiHandle )->captureClient = captureClient;
4866 ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent = captureEvent;
4869 unsigned int inBufferSize = 0;
4870 hr = captureAudioClient->GetBufferSize( &inBufferSize );
4871 if ( FAILED( hr ) ) {
4872 errorText_ = "RtApiWasapi::wasapiThread: Unable to get capture buffer size.";
4876 // scale outBufferSize according to stream->user sample rate ratio
4877 unsigned int outBufferSize = ( unsigned int ) stream_.bufferSize * stream_.nDeviceChannels[INPUT];
4878 inBufferSize *= stream_.nDeviceChannels[INPUT];
4880 // set captureBuffer size
4881 captureBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[INPUT] ) );
4883 // reset the capture stream
4884 hr = captureAudioClient->Reset();
4885 if ( FAILED( hr ) ) {
4886 errorText_ = "RtApiWasapi::wasapiThread: Unable to reset capture stream.";
4890 // start the capture stream
4891 hr = captureAudioClient->Start();
4892 if ( FAILED( hr ) ) {
4893 errorText_ = "RtApiWasapi::wasapiThread: Unable to start capture stream.";
4898 // start render stream if applicable
4899 if ( renderAudioClient ) {
4900 hr = renderAudioClient->GetMixFormat( &renderFormat );
4901 if ( FAILED( hr ) ) {
4902 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
4906 // initialize render stream according to desire buffer size
4907 REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) stream_.bufferSize * 10000000 / renderFormat->nSamplesPerSec );
4909 if ( !renderClient ) {
4910 hr = renderAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
4911 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
4912 desiredBufferPeriod,
4913 desiredBufferPeriod,
4916 if ( FAILED( hr ) ) {
4917 errorText_ = "RtApiWasapi::wasapiThread: Unable to initialize render audio client.";
4921 hr = renderAudioClient->GetService( __uuidof( IAudioRenderClient ),
4922 ( void** ) &renderClient );
4923 if ( FAILED( hr ) ) {
4924 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render client handle.";
4928 // configure renderEvent to trigger on every available render buffer
4929 renderEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
4930 if ( !renderEvent ) {
4931 errorType = RtAudioError::SYSTEM_ERROR;
4932 errorText_ = "RtApiWasapi::wasapiThread: Unable to create render event.";
4936 hr = renderAudioClient->SetEventHandle( renderEvent );
4937 if ( FAILED( hr ) ) {
4938 errorText_ = "RtApiWasapi::wasapiThread: Unable to set render event handle.";
4942 ( ( WasapiHandle* ) stream_.apiHandle )->renderClient = renderClient;
4943 ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent = renderEvent;
4946 unsigned int outBufferSize = 0;
4947 hr = renderAudioClient->GetBufferSize( &outBufferSize );
4948 if ( FAILED( hr ) ) {
4949 errorText_ = "RtApiWasapi::wasapiThread: Unable to get render buffer size.";
4953 // scale inBufferSize according to user->stream sample rate ratio
4954 unsigned int inBufferSize = ( unsigned int ) stream_.bufferSize * stream_.nDeviceChannels[OUTPUT];
4955 outBufferSize *= stream_.nDeviceChannels[OUTPUT];
4957 // set renderBuffer size
4958 renderBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[OUTPUT] ) );
4960 // reset the render stream
4961 hr = renderAudioClient->Reset();
4962 if ( FAILED( hr ) ) {
4963 errorText_ = "RtApiWasapi::wasapiThread: Unable to reset render stream.";
4967 // start the render stream
4968 hr = renderAudioClient->Start();
4969 if ( FAILED( hr ) ) {
4970 errorText_ = "RtApiWasapi::wasapiThread: Unable to start render stream.";
4975 if ( stream_.mode == INPUT ) {
4976 using namespace std; // for roundf
4977 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
4979 else if ( stream_.mode == OUTPUT ) {
4980 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
4982 else if ( stream_.mode == DUPLEX ) {
4983 deviceBuffSize = std::max( stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
4984 stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
4987 stream_.deviceBuffer = ( char* ) malloc( deviceBuffSize );
4988 if ( !stream_.deviceBuffer ) {
4989 errorType = RtAudioError::MEMORY_ERROR;
4990 errorText_ = "RtApiWasapi::wasapiThread: Error allocating device buffer memory.";
4994 // stream process loop
4995 while ( stream_.state != STREAM_STOPPING ) {
4996 if ( !callbackPulled ) {
4999 // 1. Pull callback buffer from inputBuffer
5000 // 2. If 1. was successful: Convert callback buffer to user format
5002 if ( captureAudioClient ) {
5003 // Pull callback buffer from inputBuffer
5004 callbackPulled = captureBuffer.pullBuffer( stream_.deviceBuffer,
5005 ( unsigned int ) stream_.bufferSize * stream_.nDeviceChannels[INPUT],
5006 stream_.deviceFormat[INPUT] );
5008 if ( callbackPulled ) {
5009 if ( stream_.doConvertBuffer[INPUT] ) {
5010 // Convert callback buffer to user format
5011 convertBuffer( stream_.userBuffer[INPUT],
5012 stream_.deviceBuffer,
5013 stream_.convertInfo[INPUT] );
5016 // no further conversion, simple copy deviceBuffer to userBuffer
5017 memcpy( stream_.userBuffer[INPUT],
5018 stream_.deviceBuffer,
5019 stream_.bufferSize * stream_.nUserChannels[INPUT] * formatBytes( stream_.userFormat ) );
5024 // if there is no capture stream, set callbackPulled flag
5025 callbackPulled = true;
5030 // 1. Execute user callback method
5031 // 2. Handle return value from callback
5033 // if callback has not requested the stream to stop
5034 if ( callbackPulled && !callbackStopped ) {
5035 // Execute user callback method
5036 callbackResult = callback( stream_.userBuffer[OUTPUT],
5037 stream_.userBuffer[INPUT],
5040 captureFlags & AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY ? RTAUDIO_INPUT_OVERFLOW : 0,
5041 stream_.callbackInfo.userData );
5043 // Handle return value from callback
5044 if ( callbackResult == 1 ) {
5045 // instantiate a thread to stop this thread
5046 HANDLE threadHandle = CreateThread( NULL, 0, stopWasapiThread, this, 0, NULL );
5047 if ( !threadHandle ) {
5048 errorType = RtAudioError::THREAD_ERROR;
5049 errorText_ = "RtApiWasapi::wasapiThread: Unable to instantiate stream stop thread.";
5052 else if ( !CloseHandle( threadHandle ) ) {
5053 errorType = RtAudioError::THREAD_ERROR;
5054 errorText_ = "RtApiWasapi::wasapiThread: Unable to close stream stop thread handle.";
5058 callbackStopped = true;
5060 else if ( callbackResult == 2 ) {
5061 // instantiate a thread to stop this thread
5062 HANDLE threadHandle = CreateThread( NULL, 0, abortWasapiThread, this, 0, NULL );
5063 if ( !threadHandle ) {
5064 errorType = RtAudioError::THREAD_ERROR;
5065 errorText_ = "RtApiWasapi::wasapiThread: Unable to instantiate stream abort thread.";
5068 else if ( !CloseHandle( threadHandle ) ) {
5069 errorType = RtAudioError::THREAD_ERROR;
5070 errorText_ = "RtApiWasapi::wasapiThread: Unable to close stream abort thread handle.";
5074 callbackStopped = true;
5081 // 1. Convert callback buffer to stream format
5082 // 2. Push callback buffer into outputBuffer
5084 if ( renderAudioClient && callbackPulled ) {
5085 if ( stream_.doConvertBuffer[OUTPUT] ) {
5086 // Convert callback buffer to stream format
5087 convertBuffer( stream_.deviceBuffer,
5088 stream_.userBuffer[OUTPUT],
5089 stream_.convertInfo[OUTPUT] );
5093 // Push callback buffer into outputBuffer
5094 callbackPushed = renderBuffer.pushBuffer( stream_.deviceBuffer,
5095 stream_.bufferSize * stream_.nDeviceChannels[OUTPUT],
5096 stream_.deviceFormat[OUTPUT] );
5099 // if there is no render stream, set callbackPushed flag
5100 callbackPushed = true;
5105 // 1. Get capture buffer from stream
5106 // 2. Push capture buffer into inputBuffer
5107 // 3. If 2. was successful: Release capture buffer
5109 if ( captureAudioClient ) {
5110 // if the callback input buffer was not pulled from captureBuffer, wait for next capture event
5111 if ( !callbackPulled ) {
5112 WaitForSingleObject( captureEvent, INFINITE );
5115 // Get capture buffer from stream
5116 hr = captureClient->GetBuffer( &streamBuffer,
5118 &captureFlags, NULL, NULL );
5119 if ( FAILED( hr ) ) {
5120 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve capture buffer.";
5124 if ( bufferFrameCount != 0 ) {
5125 // Push capture buffer into inputBuffer
5126 if ( captureBuffer.pushBuffer( ( char* ) streamBuffer,
5127 bufferFrameCount * stream_.nDeviceChannels[INPUT],
5128 stream_.deviceFormat[INPUT] ) )
5130 // Release capture buffer
5131 hr = captureClient->ReleaseBuffer( bufferFrameCount );
5132 if ( FAILED( hr ) ) {
5133 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5139 // Inform WASAPI that capture was unsuccessful
5140 hr = captureClient->ReleaseBuffer( 0 );
5141 if ( FAILED( hr ) ) {
5142 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5149 // Inform WASAPI that capture was unsuccessful
5150 hr = captureClient->ReleaseBuffer( 0 );
5151 if ( FAILED( hr ) ) {
5152 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5160 // 1. Get render buffer from stream
5161 // 2. Pull next buffer from outputBuffer
5162 // 3. If 2. was successful: Fill render buffer with next buffer
5163 // Release render buffer
5165 if ( renderAudioClient ) {
5166 // if the callback output buffer was not pushed to renderBuffer, wait for next render event
5167 if ( callbackPulled && !callbackPushed ) {
5168 WaitForSingleObject( renderEvent, INFINITE );
5171 // Get render buffer from stream
5172 hr = renderAudioClient->GetBufferSize( &bufferFrameCount );
5173 if ( FAILED( hr ) ) {
5174 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer size.";
5178 hr = renderAudioClient->GetCurrentPadding( &numFramesPadding );
5179 if ( FAILED( hr ) ) {
5180 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer padding.";
5184 bufferFrameCount -= numFramesPadding;
5186 if ( bufferFrameCount != 0 ) {
5187 hr = renderClient->GetBuffer( bufferFrameCount, &streamBuffer );
5188 if ( FAILED( hr ) ) {
5189 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer.";
5193 // Pull next buffer from outputBuffer
5194 // Fill render buffer with next buffer
5195 if ( renderBuffer.pullBuffer( ( char* ) streamBuffer,
5196 bufferFrameCount * stream_.nDeviceChannels[OUTPUT],
5197 stream_.deviceFormat[OUTPUT] ) )
5199 // Release render buffer
5200 hr = renderClient->ReleaseBuffer( bufferFrameCount, 0 );
5201 if ( FAILED( hr ) ) {
5202 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5208 // Inform WASAPI that render was unsuccessful
5209 hr = renderClient->ReleaseBuffer( 0, 0 );
5210 if ( FAILED( hr ) ) {
5211 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5218 // Inform WASAPI that render was unsuccessful
5219 hr = renderClient->ReleaseBuffer( 0, 0 );
5220 if ( FAILED( hr ) ) {
5221 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5227 // if the callback buffer was pushed renderBuffer reset callbackPulled flag
5228 if ( callbackPushed ) {
5229 callbackPulled = false;
5231 RtApi::tickStreamTime();
5238 CoTaskMemFree( captureFormat );
5239 CoTaskMemFree( renderFormat );
5243 // update stream state
5244 stream_.state = STREAM_STOPPED;
5246 if ( errorText_.empty() )
5252 //******************** End of __WINDOWS_WASAPI__ *********************//
5256 #if defined(__WINDOWS_DS__) // Windows DirectSound API
5258 // Modified by Robin Davies, October 2005
5259 // - Improvements to DirectX pointer chasing.
5260 // - Bug fix for non-power-of-two Asio granularity used by Edirol PCR-A30.
5261 // - Auto-call CoInitialize for DSOUND and ASIO platforms.
5262 // Various revisions for RtAudio 4.0 by Gary Scavone, April 2007
5263 // Changed device query structure for RtAudio 4.0.7, January 2010
5265 #include <mmsystem.h>
5269 #include <algorithm>
5271 #if defined(__MINGW32__)
5272 // missing from latest mingw winapi
5273 #define WAVE_FORMAT_96M08 0x00010000 /* 96 kHz, Mono, 8-bit */
5274 #define WAVE_FORMAT_96S08 0x00020000 /* 96 kHz, Stereo, 8-bit */
5275 #define WAVE_FORMAT_96M16 0x00040000 /* 96 kHz, Mono, 16-bit */
5276 #define WAVE_FORMAT_96S16 0x00080000 /* 96 kHz, Stereo, 16-bit */
5279 #define MINIMUM_DEVICE_BUFFER_SIZE 32768
5281 #ifdef _MSC_VER // if Microsoft Visual C++
5282 #pragma comment( lib, "winmm.lib" ) // then, auto-link winmm.lib. Otherwise, it has to be added manually.
5285 static inline DWORD dsPointerBetween( DWORD pointer, DWORD laterPointer, DWORD earlierPointer, DWORD bufferSize )
5287 if ( pointer > bufferSize ) pointer -= bufferSize;
5288 if ( laterPointer < earlierPointer ) laterPointer += bufferSize;
5289 if ( pointer < earlierPointer ) pointer += bufferSize;
5290 return pointer >= earlierPointer && pointer < laterPointer;
5293 // A structure to hold various information related to the DirectSound
5294 // API implementation.
5296 unsigned int drainCounter; // Tracks callback counts when draining
5297 bool internalDrain; // Indicates if stop is initiated from callback or not.
5301 UINT bufferPointer[2];
5302 DWORD dsBufferSize[2];
5303 DWORD dsPointerLeadTime[2]; // the number of bytes ahead of the safe pointer to lead by.
5307 :drainCounter(0), internalDrain(false) { id[0] = 0; id[1] = 0; buffer[0] = 0; buffer[1] = 0; xrun[0] = false; xrun[1] = false; bufferPointer[0] = 0; bufferPointer[1] = 0; }
5310 // Declarations for utility functions, callbacks, and structures
5311 // specific to the DirectSound implementation.
5312 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
5313 LPCTSTR description,
5317 static const char* getErrorString( int code );
5319 static unsigned __stdcall callbackHandler( void *ptr );
5328 : found(false) { validId[0] = false; validId[1] = false; }
5331 struct DsProbeData {
5333 std::vector<struct DsDevice>* dsDevices;
5336 RtApiDs :: RtApiDs()
5338 // Dsound will run both-threaded. If CoInitialize fails, then just
5339 // accept whatever the mainline chose for a threading model.
5340 coInitialized_ = false;
5341 HRESULT hr = CoInitialize( NULL );
5342 if ( !FAILED( hr ) ) coInitialized_ = true;
5345 RtApiDs :: ~RtApiDs()
5347 if ( stream_.state != STREAM_CLOSED ) closeStream();
5348 if ( coInitialized_ ) CoUninitialize(); // balanced call.
5351 // The DirectSound default output is always the first device.
5352 unsigned int RtApiDs :: getDefaultOutputDevice( void )
5357 // The DirectSound default input is always the first input device,
5358 // which is the first capture device enumerated.
5359 unsigned int RtApiDs :: getDefaultInputDevice( void )
5364 unsigned int RtApiDs :: getDeviceCount( void )
5366 // Set query flag for previously found devices to false, so that we
5367 // can check for any devices that have disappeared.
5368 for ( unsigned int i=0; i<dsDevices.size(); i++ )
5369 dsDevices[i].found = false;
5371 // Query DirectSound devices.
5372 struct DsProbeData probeInfo;
5373 probeInfo.isInput = false;
5374 probeInfo.dsDevices = &dsDevices;
5375 HRESULT result = DirectSoundEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
5376 if ( FAILED( result ) ) {
5377 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating output devices!";
5378 errorText_ = errorStream_.str();
5379 error( RtAudioError::WARNING );
5382 // Query DirectSoundCapture devices.
5383 probeInfo.isInput = true;
5384 result = DirectSoundCaptureEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
5385 if ( FAILED( result ) ) {
5386 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating input devices!";
5387 errorText_ = errorStream_.str();
5388 error( RtAudioError::WARNING );
5391 // Clean out any devices that may have disappeared (code update submitted by Eli Zehngut).
5392 for ( unsigned int i=0; i<dsDevices.size(); ) {
5393 if ( dsDevices[i].found == false ) dsDevices.erase( dsDevices.begin() + i );
5397 return static_cast<unsigned int>(dsDevices.size());
5400 RtAudio::DeviceInfo RtApiDs :: getDeviceInfo( unsigned int device )
5402 RtAudio::DeviceInfo info;
5403 info.probed = false;
5405 if ( dsDevices.size() == 0 ) {
5406 // Force a query of all devices
5408 if ( dsDevices.size() == 0 ) {
5409 errorText_ = "RtApiDs::getDeviceInfo: no devices found!";
5410 error( RtAudioError::INVALID_USE );
5415 if ( device >= dsDevices.size() ) {
5416 errorText_ = "RtApiDs::getDeviceInfo: device ID is invalid!";
5417 error( RtAudioError::INVALID_USE );
5422 if ( dsDevices[ device ].validId[0] == false ) goto probeInput;
5424 LPDIRECTSOUND output;
5426 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
5427 if ( FAILED( result ) ) {
5428 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
5429 errorText_ = errorStream_.str();
5430 error( RtAudioError::WARNING );
5434 outCaps.dwSize = sizeof( outCaps );
5435 result = output->GetCaps( &outCaps );
5436 if ( FAILED( result ) ) {
5438 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting capabilities!";
5439 errorText_ = errorStream_.str();
5440 error( RtAudioError::WARNING );
5444 // Get output channel information.
5445 info.outputChannels = ( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ? 2 : 1;
5447 // Get sample rate information.
5448 info.sampleRates.clear();
5449 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
5450 if ( SAMPLE_RATES[k] >= (unsigned int) outCaps.dwMinSecondarySampleRate &&
5451 SAMPLE_RATES[k] <= (unsigned int) outCaps.dwMaxSecondarySampleRate ) {
5452 info.sampleRates.push_back( SAMPLE_RATES[k] );
5454 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
5455 info.preferredSampleRate = SAMPLE_RATES[k];
5459 // Get format information.
5460 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT ) info.nativeFormats |= RTAUDIO_SINT16;
5461 if ( outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) info.nativeFormats |= RTAUDIO_SINT8;
5465 if ( getDefaultOutputDevice() == device )
5466 info.isDefaultOutput = true;
5468 if ( dsDevices[ device ].validId[1] == false ) {
5469 info.name = dsDevices[ device ].name;
5476 LPDIRECTSOUNDCAPTURE input;
5477 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
5478 if ( FAILED( result ) ) {
5479 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
5480 errorText_ = errorStream_.str();
5481 error( RtAudioError::WARNING );
5486 inCaps.dwSize = sizeof( inCaps );
5487 result = input->GetCaps( &inCaps );
5488 if ( FAILED( result ) ) {
5490 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting object capabilities (" << dsDevices[ device ].name << ")!";
5491 errorText_ = errorStream_.str();
5492 error( RtAudioError::WARNING );
5496 // Get input channel information.
5497 info.inputChannels = inCaps.dwChannels;
5499 // Get sample rate and format information.
5500 std::vector<unsigned int> rates;
5501 if ( inCaps.dwChannels >= 2 ) {
5502 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5503 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5504 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5505 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5506 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5507 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5508 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5509 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5511 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
5512 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) rates.push_back( 11025 );
5513 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) rates.push_back( 22050 );
5514 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) rates.push_back( 44100 );
5515 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) rates.push_back( 96000 );
5517 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
5518 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) rates.push_back( 11025 );
5519 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) rates.push_back( 22050 );
5520 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) rates.push_back( 44100 );
5521 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) rates.push_back( 96000 );
5524 else if ( inCaps.dwChannels == 1 ) {
5525 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5526 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5527 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5528 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5529 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5530 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5531 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5532 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5534 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
5535 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) rates.push_back( 11025 );
5536 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) rates.push_back( 22050 );
5537 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) rates.push_back( 44100 );
5538 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) rates.push_back( 96000 );
5540 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
5541 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) rates.push_back( 11025 );
5542 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) rates.push_back( 22050 );
5543 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) rates.push_back( 44100 );
5544 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) rates.push_back( 96000 );
5547 else info.inputChannels = 0; // technically, this would be an error
5551 if ( info.inputChannels == 0 ) return info;
5553 // Copy the supported rates to the info structure but avoid duplication.
5555 for ( unsigned int i=0; i<rates.size(); i++ ) {
5557 for ( unsigned int j=0; j<info.sampleRates.size(); j++ ) {
5558 if ( rates[i] == info.sampleRates[j] ) {
5563 if ( found == false ) info.sampleRates.push_back( rates[i] );
5565 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
5567 // If device opens for both playback and capture, we determine the channels.
5568 if ( info.outputChannels > 0 && info.inputChannels > 0 )
5569 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
5571 if ( device == 0 ) info.isDefaultInput = true;
5573 // Copy name and return.
5574 info.name = dsDevices[ device ].name;
5579 bool RtApiDs :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
5580 unsigned int firstChannel, unsigned int sampleRate,
5581 RtAudioFormat format, unsigned int *bufferSize,
5582 RtAudio::StreamOptions *options )
5584 if ( channels + firstChannel > 2 ) {
5585 errorText_ = "RtApiDs::probeDeviceOpen: DirectSound does not support more than 2 channels per device.";
5589 size_t nDevices = dsDevices.size();
5590 if ( nDevices == 0 ) {
5591 // This should not happen because a check is made before this function is called.
5592 errorText_ = "RtApiDs::probeDeviceOpen: no devices found!";
5596 if ( device >= nDevices ) {
5597 // This should not happen because a check is made before this function is called.
5598 errorText_ = "RtApiDs::probeDeviceOpen: device ID is invalid!";
5602 if ( mode == OUTPUT ) {
5603 if ( dsDevices[ device ].validId[0] == false ) {
5604 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support output!";
5605 errorText_ = errorStream_.str();
5609 else { // mode == INPUT
5610 if ( dsDevices[ device ].validId[1] == false ) {
5611 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support input!";
5612 errorText_ = errorStream_.str();
5617 // According to a note in PortAudio, using GetDesktopWindow()
5618 // instead of GetForegroundWindow() is supposed to avoid problems
5619 // that occur when the application's window is not the foreground
5620 // window. Also, if the application window closes before the
5621 // DirectSound buffer, DirectSound can crash. In the past, I had
5622 // problems when using GetDesktopWindow() but it seems fine now
5623 // (January 2010). I'll leave it commented here.
5624 // HWND hWnd = GetForegroundWindow();
5625 HWND hWnd = GetDesktopWindow();
5627 // Check the numberOfBuffers parameter and limit the lowest value to
5628 // two. This is a judgement call and a value of two is probably too
5629 // low for capture, but it should work for playback.
5631 if ( options ) nBuffers = options->numberOfBuffers;
5632 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) nBuffers = 2;
5633 if ( nBuffers < 2 ) nBuffers = 3;
5635 // Check the lower range of the user-specified buffer size and set
5636 // (arbitrarily) to a lower bound of 32.
5637 if ( *bufferSize < 32 ) *bufferSize = 32;
5639 // Create the wave format structure. The data format setting will
5640 // be determined later.
5641 WAVEFORMATEX waveFormat;
5642 ZeroMemory( &waveFormat, sizeof(WAVEFORMATEX) );
5643 waveFormat.wFormatTag = WAVE_FORMAT_PCM;
5644 waveFormat.nChannels = channels + firstChannel;
5645 waveFormat.nSamplesPerSec = (unsigned long) sampleRate;
5647 // Determine the device buffer size. By default, we'll use the value
5648 // defined above (32K), but we will grow it to make allowances for
5649 // very large software buffer sizes.
5650 DWORD dsBufferSize = MINIMUM_DEVICE_BUFFER_SIZE;
5651 DWORD dsPointerLeadTime = 0;
5653 void *ohandle = 0, *bhandle = 0;
5655 if ( mode == OUTPUT ) {
5657 LPDIRECTSOUND output;
5658 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
5659 if ( FAILED( result ) ) {
5660 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
5661 errorText_ = errorStream_.str();
5666 outCaps.dwSize = sizeof( outCaps );
5667 result = output->GetCaps( &outCaps );
5668 if ( FAILED( result ) ) {
5670 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting capabilities (" << dsDevices[ device ].name << ")!";
5671 errorText_ = errorStream_.str();
5675 // Check channel information.
5676 if ( channels + firstChannel == 2 && !( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ) {
5677 errorStream_ << "RtApiDs::getDeviceInfo: the output device (" << dsDevices[ device ].name << ") does not support stereo playback.";
5678 errorText_ = errorStream_.str();
5682 // Check format information. Use 16-bit format unless not
5683 // supported or user requests 8-bit.
5684 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT &&
5685 !( format == RTAUDIO_SINT8 && outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) ) {
5686 waveFormat.wBitsPerSample = 16;
5687 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
5690 waveFormat.wBitsPerSample = 8;
5691 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
5693 stream_.userFormat = format;
5695 // Update wave format structure and buffer information.
5696 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
5697 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
5698 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
5700 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
5701 while ( dsPointerLeadTime * 2U > dsBufferSize )
5704 // Set cooperative level to DSSCL_EXCLUSIVE ... sound stops when window focus changes.
5705 // result = output->SetCooperativeLevel( hWnd, DSSCL_EXCLUSIVE );
5706 // Set cooperative level to DSSCL_PRIORITY ... sound remains when window focus changes.
5707 result = output->SetCooperativeLevel( hWnd, DSSCL_PRIORITY );
5708 if ( FAILED( result ) ) {
5710 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting cooperative level (" << dsDevices[ device ].name << ")!";
5711 errorText_ = errorStream_.str();
5715 // Even though we will write to the secondary buffer, we need to
5716 // access the primary buffer to set the correct output format
5717 // (since the default is 8-bit, 22 kHz!). Setup the DS primary
5718 // buffer description.
5719 DSBUFFERDESC bufferDescription;
5720 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
5721 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
5722 bufferDescription.dwFlags = DSBCAPS_PRIMARYBUFFER;
5724 // Obtain the primary buffer
5725 LPDIRECTSOUNDBUFFER buffer;
5726 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
5727 if ( FAILED( result ) ) {
5729 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") accessing primary buffer (" << dsDevices[ device ].name << ")!";
5730 errorText_ = errorStream_.str();
5734 // Set the primary DS buffer sound format.
5735 result = buffer->SetFormat( &waveFormat );
5736 if ( FAILED( result ) ) {
5738 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting primary buffer format (" << dsDevices[ device ].name << ")!";
5739 errorText_ = errorStream_.str();
5743 // Setup the secondary DS buffer description.
5744 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
5745 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
5746 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
5747 DSBCAPS_GLOBALFOCUS |
5748 DSBCAPS_GETCURRENTPOSITION2 |
5749 DSBCAPS_LOCHARDWARE ); // Force hardware mixing
5750 bufferDescription.dwBufferBytes = dsBufferSize;
5751 bufferDescription.lpwfxFormat = &waveFormat;
5753 // Try to create the secondary DS buffer. If that doesn't work,
5754 // try to use software mixing. Otherwise, there's a problem.
5755 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
5756 if ( FAILED( result ) ) {
5757 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
5758 DSBCAPS_GLOBALFOCUS |
5759 DSBCAPS_GETCURRENTPOSITION2 |
5760 DSBCAPS_LOCSOFTWARE ); // Force software mixing
5761 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
5762 if ( FAILED( result ) ) {
5764 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating secondary buffer (" << dsDevices[ device ].name << ")!";
5765 errorText_ = errorStream_.str();
5770 // Get the buffer size ... might be different from what we specified.
5772 dsbcaps.dwSize = sizeof( DSBCAPS );
5773 result = buffer->GetCaps( &dsbcaps );
5774 if ( FAILED( result ) ) {
5777 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
5778 errorText_ = errorStream_.str();
5782 dsBufferSize = dsbcaps.dwBufferBytes;
5784 // Lock the DS buffer
5787 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
5788 if ( FAILED( result ) ) {
5791 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking buffer (" << dsDevices[ device ].name << ")!";
5792 errorText_ = errorStream_.str();
5796 // Zero the DS buffer
5797 ZeroMemory( audioPtr, dataLen );
5799 // Unlock the DS buffer
5800 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
5801 if ( FAILED( result ) ) {
5804 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking buffer (" << dsDevices[ device ].name << ")!";
5805 errorText_ = errorStream_.str();
5809 ohandle = (void *) output;
5810 bhandle = (void *) buffer;
5813 if ( mode == INPUT ) {
5815 LPDIRECTSOUNDCAPTURE input;
5816 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
5817 if ( FAILED( result ) ) {
5818 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
5819 errorText_ = errorStream_.str();
5824 inCaps.dwSize = sizeof( inCaps );
5825 result = input->GetCaps( &inCaps );
5826 if ( FAILED( result ) ) {
5828 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting input capabilities (" << dsDevices[ device ].name << ")!";
5829 errorText_ = errorStream_.str();
5833 // Check channel information.
5834 if ( inCaps.dwChannels < channels + firstChannel ) {
5835 errorText_ = "RtApiDs::getDeviceInfo: the input device does not support requested input channels.";
5839 // Check format information. Use 16-bit format unless user
5841 DWORD deviceFormats;
5842 if ( channels + firstChannel == 2 ) {
5843 deviceFormats = WAVE_FORMAT_1S08 | WAVE_FORMAT_2S08 | WAVE_FORMAT_4S08 | WAVE_FORMAT_96S08;
5844 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
5845 waveFormat.wBitsPerSample = 8;
5846 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
5848 else { // assume 16-bit is supported
5849 waveFormat.wBitsPerSample = 16;
5850 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
5853 else { // channel == 1
5854 deviceFormats = WAVE_FORMAT_1M08 | WAVE_FORMAT_2M08 | WAVE_FORMAT_4M08 | WAVE_FORMAT_96M08;
5855 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
5856 waveFormat.wBitsPerSample = 8;
5857 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
5859 else { // assume 16-bit is supported
5860 waveFormat.wBitsPerSample = 16;
5861 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
5864 stream_.userFormat = format;
5866 // Update wave format structure and buffer information.
5867 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
5868 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
5869 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
5871 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
5872 while ( dsPointerLeadTime * 2U > dsBufferSize )
5875 // Setup the secondary DS buffer description.
5876 DSCBUFFERDESC bufferDescription;
5877 ZeroMemory( &bufferDescription, sizeof( DSCBUFFERDESC ) );
5878 bufferDescription.dwSize = sizeof( DSCBUFFERDESC );
5879 bufferDescription.dwFlags = 0;
5880 bufferDescription.dwReserved = 0;
5881 bufferDescription.dwBufferBytes = dsBufferSize;
5882 bufferDescription.lpwfxFormat = &waveFormat;
5884 // Create the capture buffer.
5885 LPDIRECTSOUNDCAPTUREBUFFER buffer;
5886 result = input->CreateCaptureBuffer( &bufferDescription, &buffer, NULL );
5887 if ( FAILED( result ) ) {
5889 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating input buffer (" << dsDevices[ device ].name << ")!";
5890 errorText_ = errorStream_.str();
5894 // Get the buffer size ... might be different from what we specified.
5896 dscbcaps.dwSize = sizeof( DSCBCAPS );
5897 result = buffer->GetCaps( &dscbcaps );
5898 if ( FAILED( result ) ) {
5901 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
5902 errorText_ = errorStream_.str();
5906 dsBufferSize = dscbcaps.dwBufferBytes;
5908 // NOTE: We could have a problem here if this is a duplex stream
5909 // and the play and capture hardware buffer sizes are different
5910 // (I'm actually not sure if that is a problem or not).
5911 // Currently, we are not verifying that.
5913 // Lock the capture buffer
5916 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
5917 if ( FAILED( result ) ) {
5920 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking input buffer (" << dsDevices[ device ].name << ")!";
5921 errorText_ = errorStream_.str();
5926 ZeroMemory( audioPtr, dataLen );
5928 // Unlock the buffer
5929 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
5930 if ( FAILED( result ) ) {
5933 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking input buffer (" << dsDevices[ device ].name << ")!";
5934 errorText_ = errorStream_.str();
5938 ohandle = (void *) input;
5939 bhandle = (void *) buffer;
5942 // Set various stream parameters
5943 DsHandle *handle = 0;
5944 stream_.nDeviceChannels[mode] = channels + firstChannel;
5945 stream_.nUserChannels[mode] = channels;
5946 stream_.bufferSize = *bufferSize;
5947 stream_.channelOffset[mode] = firstChannel;
5948 stream_.deviceInterleaved[mode] = true;
5949 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
5950 else stream_.userInterleaved = true;
5952 // Set flag for buffer conversion
5953 stream_.doConvertBuffer[mode] = false;
5954 if (stream_.nUserChannels[mode] != stream_.nDeviceChannels[mode])
5955 stream_.doConvertBuffer[mode] = true;
5956 if (stream_.userFormat != stream_.deviceFormat[mode])
5957 stream_.doConvertBuffer[mode] = true;
5958 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
5959 stream_.nUserChannels[mode] > 1 )
5960 stream_.doConvertBuffer[mode] = true;
5962 // Allocate necessary internal buffers
5963 long bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
5964 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
5965 if ( stream_.userBuffer[mode] == NULL ) {
5966 errorText_ = "RtApiDs::probeDeviceOpen: error allocating user buffer memory.";
5970 if ( stream_.doConvertBuffer[mode] ) {
5972 bool makeBuffer = true;
5973 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
5974 if ( mode == INPUT ) {
5975 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
5976 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
5977 if ( bufferBytes <= (long) bytesOut ) makeBuffer = false;
5982 bufferBytes *= *bufferSize;
5983 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
5984 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
5985 if ( stream_.deviceBuffer == NULL ) {
5986 errorText_ = "RtApiDs::probeDeviceOpen: error allocating device buffer memory.";
5992 // Allocate our DsHandle structures for the stream.
5993 if ( stream_.apiHandle == 0 ) {
5995 handle = new DsHandle;
5997 catch ( std::bad_alloc& ) {
5998 errorText_ = "RtApiDs::probeDeviceOpen: error allocating AsioHandle memory.";
6002 // Create a manual-reset event.
6003 handle->condition = CreateEvent( NULL, // no security
6004 TRUE, // manual-reset
6005 FALSE, // non-signaled initially
6007 stream_.apiHandle = (void *) handle;
6010 handle = (DsHandle *) stream_.apiHandle;
6011 handle->id[mode] = ohandle;
6012 handle->buffer[mode] = bhandle;
6013 handle->dsBufferSize[mode] = dsBufferSize;
6014 handle->dsPointerLeadTime[mode] = dsPointerLeadTime;
6016 stream_.device[mode] = device;
6017 stream_.state = STREAM_STOPPED;
6018 if ( stream_.mode == OUTPUT && mode == INPUT )
6019 // We had already set up an output stream.
6020 stream_.mode = DUPLEX;
6022 stream_.mode = mode;
6023 stream_.nBuffers = nBuffers;
6024 stream_.sampleRate = sampleRate;
6026 // Setup the buffer conversion information structure.
6027 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
6029 // Setup the callback thread.
6030 if ( stream_.callbackInfo.isRunning == false ) {
6032 stream_.callbackInfo.isRunning = true;
6033 stream_.callbackInfo.object = (void *) this;
6034 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &callbackHandler,
6035 &stream_.callbackInfo, 0, &threadId );
6036 if ( stream_.callbackInfo.thread == 0 ) {
6037 errorText_ = "RtApiDs::probeDeviceOpen: error creating callback thread!";
6041 // Boost DS thread priority
6042 SetThreadPriority( (HANDLE) stream_.callbackInfo.thread, THREAD_PRIORITY_HIGHEST );
6048 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
6049 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
6050 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6051 if ( buffer ) buffer->Release();
6054 if ( handle->buffer[1] ) {
6055 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
6056 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6057 if ( buffer ) buffer->Release();
6060 CloseHandle( handle->condition );
6062 stream_.apiHandle = 0;
6065 for ( int i=0; i<2; i++ ) {
6066 if ( stream_.userBuffer[i] ) {
6067 free( stream_.userBuffer[i] );
6068 stream_.userBuffer[i] = 0;
6072 if ( stream_.deviceBuffer ) {
6073 free( stream_.deviceBuffer );
6074 stream_.deviceBuffer = 0;
6077 stream_.state = STREAM_CLOSED;
6081 void RtApiDs :: closeStream()
6083 if ( stream_.state == STREAM_CLOSED ) {
6084 errorText_ = "RtApiDs::closeStream(): no open stream to close!";
6085 error( RtAudioError::WARNING );
6089 // Stop the callback thread.
6090 stream_.callbackInfo.isRunning = false;
6091 WaitForSingleObject( (HANDLE) stream_.callbackInfo.thread, INFINITE );
6092 CloseHandle( (HANDLE) stream_.callbackInfo.thread );
6094 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6096 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
6097 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
6098 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6105 if ( handle->buffer[1] ) {
6106 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
6107 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6114 CloseHandle( handle->condition );
6116 stream_.apiHandle = 0;
6119 for ( int i=0; i<2; i++ ) {
6120 if ( stream_.userBuffer[i] ) {
6121 free( stream_.userBuffer[i] );
6122 stream_.userBuffer[i] = 0;
6126 if ( stream_.deviceBuffer ) {
6127 free( stream_.deviceBuffer );
6128 stream_.deviceBuffer = 0;
6131 stream_.mode = UNINITIALIZED;
6132 stream_.state = STREAM_CLOSED;
6135 void RtApiDs :: startStream()
6138 if ( stream_.state == STREAM_RUNNING ) {
6139 errorText_ = "RtApiDs::startStream(): the stream is already running!";
6140 error( RtAudioError::WARNING );
6144 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6146 // Increase scheduler frequency on lesser windows (a side-effect of
6147 // increasing timer accuracy). On greater windows (Win2K or later),
6148 // this is already in effect.
6149 timeBeginPeriod( 1 );
6151 buffersRolling = false;
6152 duplexPrerollBytes = 0;
6154 if ( stream_.mode == DUPLEX ) {
6155 // 0.5 seconds of silence in DUPLEX mode while the devices spin up and synchronize.
6156 duplexPrerollBytes = (int) ( 0.5 * stream_.sampleRate * formatBytes( stream_.deviceFormat[1] ) * stream_.nDeviceChannels[1] );
6160 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6162 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6163 result = buffer->Play( 0, 0, DSBPLAY_LOOPING );
6164 if ( FAILED( result ) ) {
6165 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting output buffer!";
6166 errorText_ = errorStream_.str();
6171 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6173 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6174 result = buffer->Start( DSCBSTART_LOOPING );
6175 if ( FAILED( result ) ) {
6176 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting input buffer!";
6177 errorText_ = errorStream_.str();
6182 handle->drainCounter = 0;
6183 handle->internalDrain = false;
6184 ResetEvent( handle->condition );
6185 stream_.state = STREAM_RUNNING;
6188 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
6191 void RtApiDs :: stopStream()
6194 if ( stream_.state == STREAM_STOPPED ) {
6195 errorText_ = "RtApiDs::stopStream(): the stream is already stopped!";
6196 error( RtAudioError::WARNING );
6203 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6204 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6205 if ( handle->drainCounter == 0 ) {
6206 handle->drainCounter = 2;
6207 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
6210 stream_.state = STREAM_STOPPED;
6212 MUTEX_LOCK( &stream_.mutex );
6214 // Stop the buffer and clear memory
6215 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6216 result = buffer->Stop();
6217 if ( FAILED( result ) ) {
6218 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping output buffer!";
6219 errorText_ = errorStream_.str();
6223 // Lock the buffer and clear it so that if we start to play again,
6224 // we won't have old data playing.
6225 result = buffer->Lock( 0, handle->dsBufferSize[0], &audioPtr, &dataLen, NULL, NULL, 0 );
6226 if ( FAILED( result ) ) {
6227 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking output buffer!";
6228 errorText_ = errorStream_.str();
6232 // Zero the DS buffer
6233 ZeroMemory( audioPtr, dataLen );
6235 // Unlock the DS buffer
6236 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6237 if ( FAILED( result ) ) {
6238 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking output buffer!";
6239 errorText_ = errorStream_.str();
6243 // If we start playing again, we must begin at beginning of buffer.
6244 handle->bufferPointer[0] = 0;
6247 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6248 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6252 stream_.state = STREAM_STOPPED;
6254 if ( stream_.mode != DUPLEX )
6255 MUTEX_LOCK( &stream_.mutex );
6257 result = buffer->Stop();
6258 if ( FAILED( result ) ) {
6259 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping input buffer!";
6260 errorText_ = errorStream_.str();
6264 // Lock the buffer and clear it so that if we start to play again,
6265 // we won't have old data playing.
6266 result = buffer->Lock( 0, handle->dsBufferSize[1], &audioPtr, &dataLen, NULL, NULL, 0 );
6267 if ( FAILED( result ) ) {
6268 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking input buffer!";
6269 errorText_ = errorStream_.str();
6273 // Zero the DS buffer
6274 ZeroMemory( audioPtr, dataLen );
6276 // Unlock the DS buffer
6277 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6278 if ( FAILED( result ) ) {
6279 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking input buffer!";
6280 errorText_ = errorStream_.str();
6284 // If we start recording again, we must begin at beginning of buffer.
6285 handle->bufferPointer[1] = 0;
6289 timeEndPeriod( 1 ); // revert to normal scheduler frequency on lesser windows.
6290 MUTEX_UNLOCK( &stream_.mutex );
6292 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
6295 void RtApiDs :: abortStream()
6298 if ( stream_.state == STREAM_STOPPED ) {
6299 errorText_ = "RtApiDs::abortStream(): the stream is already stopped!";
6300 error( RtAudioError::WARNING );
6304 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6305 handle->drainCounter = 2;
6310 void RtApiDs :: callbackEvent()
6312 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) {
6313 Sleep( 50 ); // sleep 50 milliseconds
6317 if ( stream_.state == STREAM_CLOSED ) {
6318 errorText_ = "RtApiDs::callbackEvent(): the stream is closed ... this shouldn't happen!";
6319 error( RtAudioError::WARNING );
6323 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
6324 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6326 // Check if we were draining the stream and signal is finished.
6327 if ( handle->drainCounter > stream_.nBuffers + 2 ) {
6329 stream_.state = STREAM_STOPPING;
6330 if ( handle->internalDrain == false )
6331 SetEvent( handle->condition );
6337 // Invoke user callback to get fresh output data UNLESS we are
6339 if ( handle->drainCounter == 0 ) {
6340 RtAudioCallback callback = (RtAudioCallback) info->callback;
6341 double streamTime = getStreamTime();
6342 RtAudioStreamStatus status = 0;
6343 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
6344 status |= RTAUDIO_OUTPUT_UNDERFLOW;
6345 handle->xrun[0] = false;
6347 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
6348 status |= RTAUDIO_INPUT_OVERFLOW;
6349 handle->xrun[1] = false;
6351 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
6352 stream_.bufferSize, streamTime, status, info->userData );
6353 if ( cbReturnValue == 2 ) {
6354 stream_.state = STREAM_STOPPING;
6355 handle->drainCounter = 2;
6359 else if ( cbReturnValue == 1 ) {
6360 handle->drainCounter = 1;
6361 handle->internalDrain = true;
6366 DWORD currentWritePointer, safeWritePointer;
6367 DWORD currentReadPointer, safeReadPointer;
6368 UINT nextWritePointer;
6370 LPVOID buffer1 = NULL;
6371 LPVOID buffer2 = NULL;
6372 DWORD bufferSize1 = 0;
6373 DWORD bufferSize2 = 0;
6378 MUTEX_LOCK( &stream_.mutex );
6379 if ( stream_.state == STREAM_STOPPED ) {
6380 MUTEX_UNLOCK( &stream_.mutex );
6384 if ( buffersRolling == false ) {
6385 if ( stream_.mode == DUPLEX ) {
6386 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
6388 // It takes a while for the devices to get rolling. As a result,
6389 // there's no guarantee that the capture and write device pointers
6390 // will move in lockstep. Wait here for both devices to start
6391 // rolling, and then set our buffer pointers accordingly.
6392 // e.g. Crystal Drivers: the capture buffer starts up 5700 to 9600
6393 // bytes later than the write buffer.
6395 // Stub: a serious risk of having a pre-emptive scheduling round
6396 // take place between the two GetCurrentPosition calls... but I'm
6397 // really not sure how to solve the problem. Temporarily boost to
6398 // Realtime priority, maybe; but I'm not sure what priority the
6399 // DirectSound service threads run at. We *should* be roughly
6400 // within a ms or so of correct.
6402 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6403 LPDIRECTSOUNDCAPTUREBUFFER dsCaptureBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6405 DWORD startSafeWritePointer, startSafeReadPointer;
6407 result = dsWriteBuffer->GetCurrentPosition( NULL, &startSafeWritePointer );
6408 if ( FAILED( result ) ) {
6409 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6410 errorText_ = errorStream_.str();
6411 MUTEX_UNLOCK( &stream_.mutex );
6412 error( RtAudioError::SYSTEM_ERROR );
6415 result = dsCaptureBuffer->GetCurrentPosition( NULL, &startSafeReadPointer );
6416 if ( FAILED( result ) ) {
6417 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6418 errorText_ = errorStream_.str();
6419 MUTEX_UNLOCK( &stream_.mutex );
6420 error( RtAudioError::SYSTEM_ERROR );
6424 result = dsWriteBuffer->GetCurrentPosition( NULL, &safeWritePointer );
6425 if ( FAILED( result ) ) {
6426 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6427 errorText_ = errorStream_.str();
6428 MUTEX_UNLOCK( &stream_.mutex );
6429 error( RtAudioError::SYSTEM_ERROR );
6432 result = dsCaptureBuffer->GetCurrentPosition( NULL, &safeReadPointer );
6433 if ( FAILED( result ) ) {
6434 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6435 errorText_ = errorStream_.str();
6436 MUTEX_UNLOCK( &stream_.mutex );
6437 error( RtAudioError::SYSTEM_ERROR );
6440 if ( safeWritePointer != startSafeWritePointer && safeReadPointer != startSafeReadPointer ) break;
6444 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
6446 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
6447 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
6448 handle->bufferPointer[1] = safeReadPointer;
6450 else if ( stream_.mode == OUTPUT ) {
6452 // Set the proper nextWritePosition after initial startup.
6453 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6454 result = dsWriteBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
6455 if ( FAILED( result ) ) {
6456 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6457 errorText_ = errorStream_.str();
6458 MUTEX_UNLOCK( &stream_.mutex );
6459 error( RtAudioError::SYSTEM_ERROR );
6462 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
6463 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
6466 buffersRolling = true;
6469 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6471 LPDIRECTSOUNDBUFFER dsBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6473 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
6474 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
6475 bufferBytes *= formatBytes( stream_.userFormat );
6476 memset( stream_.userBuffer[0], 0, bufferBytes );
6479 // Setup parameters and do buffer conversion if necessary.
6480 if ( stream_.doConvertBuffer[0] ) {
6481 buffer = stream_.deviceBuffer;
6482 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
6483 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[0];
6484 bufferBytes *= formatBytes( stream_.deviceFormat[0] );
6487 buffer = stream_.userBuffer[0];
6488 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
6489 bufferBytes *= formatBytes( stream_.userFormat );
6492 // No byte swapping necessary in DirectSound implementation.
6494 // Ahhh ... windoze. 16-bit data is signed but 8-bit data is
6495 // unsigned. So, we need to convert our signed 8-bit data here to
6497 if ( stream_.deviceFormat[0] == RTAUDIO_SINT8 )
6498 for ( int i=0; i<bufferBytes; i++ ) buffer[i] = (unsigned char) ( buffer[i] + 128 );
6500 DWORD dsBufferSize = handle->dsBufferSize[0];
6501 nextWritePointer = handle->bufferPointer[0];
6503 DWORD endWrite, leadPointer;
6505 // Find out where the read and "safe write" pointers are.
6506 result = dsBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
6507 if ( FAILED( result ) ) {
6508 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6509 errorText_ = errorStream_.str();
6510 MUTEX_UNLOCK( &stream_.mutex );
6511 error( RtAudioError::SYSTEM_ERROR );
6515 // We will copy our output buffer into the region between
6516 // safeWritePointer and leadPointer. If leadPointer is not
6517 // beyond the next endWrite position, wait until it is.
6518 leadPointer = safeWritePointer + handle->dsPointerLeadTime[0];
6519 //std::cout << "safeWritePointer = " << safeWritePointer << ", leadPointer = " << leadPointer << ", nextWritePointer = " << nextWritePointer << std::endl;
6520 if ( leadPointer > dsBufferSize ) leadPointer -= dsBufferSize;
6521 if ( leadPointer < nextWritePointer ) leadPointer += dsBufferSize; // unwrap offset
6522 endWrite = nextWritePointer + bufferBytes;
6524 // Check whether the entire write region is behind the play pointer.
6525 if ( leadPointer >= endWrite ) break;
6527 // If we are here, then we must wait until the leadPointer advances
6528 // beyond the end of our next write region. We use the
6529 // Sleep() function to suspend operation until that happens.
6530 double millis = ( endWrite - leadPointer ) * 1000.0;
6531 millis /= ( formatBytes( stream_.deviceFormat[0]) * stream_.nDeviceChannels[0] * stream_.sampleRate);
6532 if ( millis < 1.0 ) millis = 1.0;
6533 Sleep( (DWORD) millis );
6536 if ( dsPointerBetween( nextWritePointer, safeWritePointer, currentWritePointer, dsBufferSize )
6537 || dsPointerBetween( endWrite, safeWritePointer, currentWritePointer, dsBufferSize ) ) {
6538 // We've strayed into the forbidden zone ... resync the read pointer.
6539 handle->xrun[0] = true;
6540 nextWritePointer = safeWritePointer + handle->dsPointerLeadTime[0] - bufferBytes;
6541 if ( nextWritePointer >= dsBufferSize ) nextWritePointer -= dsBufferSize;
6542 handle->bufferPointer[0] = nextWritePointer;
6543 endWrite = nextWritePointer + bufferBytes;
6546 // Lock free space in the buffer
6547 result = dsBuffer->Lock( nextWritePointer, bufferBytes, &buffer1,
6548 &bufferSize1, &buffer2, &bufferSize2, 0 );
6549 if ( FAILED( result ) ) {
6550 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking buffer during playback!";
6551 errorText_ = errorStream_.str();
6552 MUTEX_UNLOCK( &stream_.mutex );
6553 error( RtAudioError::SYSTEM_ERROR );
6557 // Copy our buffer into the DS buffer
6558 CopyMemory( buffer1, buffer, bufferSize1 );
6559 if ( buffer2 != NULL ) CopyMemory( buffer2, buffer+bufferSize1, bufferSize2 );
6561 // Update our buffer offset and unlock sound buffer
6562 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
6563 if ( FAILED( result ) ) {
6564 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking buffer during playback!";
6565 errorText_ = errorStream_.str();
6566 MUTEX_UNLOCK( &stream_.mutex );
6567 error( RtAudioError::SYSTEM_ERROR );
6570 nextWritePointer = ( nextWritePointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
6571 handle->bufferPointer[0] = nextWritePointer;
6574 // Don't bother draining input
6575 if ( handle->drainCounter ) {
6576 handle->drainCounter++;
6580 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6582 // Setup parameters.
6583 if ( stream_.doConvertBuffer[1] ) {
6584 buffer = stream_.deviceBuffer;
6585 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[1];
6586 bufferBytes *= formatBytes( stream_.deviceFormat[1] );
6589 buffer = stream_.userBuffer[1];
6590 bufferBytes = stream_.bufferSize * stream_.nUserChannels[1];
6591 bufferBytes *= formatBytes( stream_.userFormat );
6594 LPDIRECTSOUNDCAPTUREBUFFER dsBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6595 long nextReadPointer = handle->bufferPointer[1];
6596 DWORD dsBufferSize = handle->dsBufferSize[1];
6598 // Find out where the write and "safe read" pointers are.
6599 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
6600 if ( FAILED( result ) ) {
6601 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6602 errorText_ = errorStream_.str();
6603 MUTEX_UNLOCK( &stream_.mutex );
6604 error( RtAudioError::SYSTEM_ERROR );
6608 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
6609 DWORD endRead = nextReadPointer + bufferBytes;
6611 // Handling depends on whether we are INPUT or DUPLEX.
6612 // If we're in INPUT mode then waiting is a good thing. If we're in DUPLEX mode,
6613 // then a wait here will drag the write pointers into the forbidden zone.
6615 // In DUPLEX mode, rather than wait, we will back off the read pointer until
6616 // it's in a safe position. This causes dropouts, but it seems to be the only
6617 // practical way to sync up the read and write pointers reliably, given the
6618 // the very complex relationship between phase and increment of the read and write
6621 // In order to minimize audible dropouts in DUPLEX mode, we will
6622 // provide a pre-roll period of 0.5 seconds in which we return
6623 // zeros from the read buffer while the pointers sync up.
6625 if ( stream_.mode == DUPLEX ) {
6626 if ( safeReadPointer < endRead ) {
6627 if ( duplexPrerollBytes <= 0 ) {
6628 // Pre-roll time over. Be more agressive.
6629 int adjustment = endRead-safeReadPointer;
6631 handle->xrun[1] = true;
6633 // - large adjustments: we've probably run out of CPU cycles, so just resync exactly,
6634 // and perform fine adjustments later.
6635 // - small adjustments: back off by twice as much.
6636 if ( adjustment >= 2*bufferBytes )
6637 nextReadPointer = safeReadPointer-2*bufferBytes;
6639 nextReadPointer = safeReadPointer-bufferBytes-adjustment;
6641 if ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
6645 // In pre=roll time. Just do it.
6646 nextReadPointer = safeReadPointer - bufferBytes;
6647 while ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
6649 endRead = nextReadPointer + bufferBytes;
6652 else { // mode == INPUT
6653 while ( safeReadPointer < endRead && stream_.callbackInfo.isRunning ) {
6654 // See comments for playback.
6655 double millis = (endRead - safeReadPointer) * 1000.0;
6656 millis /= ( formatBytes(stream_.deviceFormat[1]) * stream_.nDeviceChannels[1] * stream_.sampleRate);
6657 if ( millis < 1.0 ) millis = 1.0;
6658 Sleep( (DWORD) millis );
6660 // Wake up and find out where we are now.
6661 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
6662 if ( FAILED( result ) ) {
6663 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6664 errorText_ = errorStream_.str();
6665 MUTEX_UNLOCK( &stream_.mutex );
6666 error( RtAudioError::SYSTEM_ERROR );
6670 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
6674 // Lock free space in the buffer
6675 result = dsBuffer->Lock( nextReadPointer, bufferBytes, &buffer1,
6676 &bufferSize1, &buffer2, &bufferSize2, 0 );
6677 if ( FAILED( result ) ) {
6678 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking capture buffer!";
6679 errorText_ = errorStream_.str();
6680 MUTEX_UNLOCK( &stream_.mutex );
6681 error( RtAudioError::SYSTEM_ERROR );
6685 if ( duplexPrerollBytes <= 0 ) {
6686 // Copy our buffer into the DS buffer
6687 CopyMemory( buffer, buffer1, bufferSize1 );
6688 if ( buffer2 != NULL ) CopyMemory( buffer+bufferSize1, buffer2, bufferSize2 );
6691 memset( buffer, 0, bufferSize1 );
6692 if ( buffer2 != NULL ) memset( buffer + bufferSize1, 0, bufferSize2 );
6693 duplexPrerollBytes -= bufferSize1 + bufferSize2;
6696 // Update our buffer offset and unlock sound buffer
6697 nextReadPointer = ( nextReadPointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
6698 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
6699 if ( FAILED( result ) ) {
6700 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking capture buffer!";
6701 errorText_ = errorStream_.str();
6702 MUTEX_UNLOCK( &stream_.mutex );
6703 error( RtAudioError::SYSTEM_ERROR );
6706 handle->bufferPointer[1] = nextReadPointer;
6708 // No byte swapping necessary in DirectSound implementation.
6710 // If necessary, convert 8-bit data from unsigned to signed.
6711 if ( stream_.deviceFormat[1] == RTAUDIO_SINT8 )
6712 for ( int j=0; j<bufferBytes; j++ ) buffer[j] = (signed char) ( buffer[j] - 128 );
6714 // Do buffer conversion if necessary.
6715 if ( stream_.doConvertBuffer[1] )
6716 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
6720 MUTEX_UNLOCK( &stream_.mutex );
6721 RtApi::tickStreamTime();
6724 // Definitions for utility functions and callbacks
6725 // specific to the DirectSound implementation.
6727 static unsigned __stdcall callbackHandler( void *ptr )
6729 CallbackInfo *info = (CallbackInfo *) ptr;
6730 RtApiDs *object = (RtApiDs *) info->object;
6731 bool* isRunning = &info->isRunning;
6733 while ( *isRunning == true ) {
6734 object->callbackEvent();
6741 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
6742 LPCTSTR description,
6746 struct DsProbeData& probeInfo = *(struct DsProbeData*) lpContext;
6747 std::vector<struct DsDevice>& dsDevices = *probeInfo.dsDevices;
6750 bool validDevice = false;
6751 if ( probeInfo.isInput == true ) {
6753 LPDIRECTSOUNDCAPTURE object;
6755 hr = DirectSoundCaptureCreate( lpguid, &object, NULL );
6756 if ( hr != DS_OK ) return TRUE;
6758 caps.dwSize = sizeof(caps);
6759 hr = object->GetCaps( &caps );
6760 if ( hr == DS_OK ) {
6761 if ( caps.dwChannels > 0 && caps.dwFormats > 0 )
6768 LPDIRECTSOUND object;
6769 hr = DirectSoundCreate( lpguid, &object, NULL );
6770 if ( hr != DS_OK ) return TRUE;
6772 caps.dwSize = sizeof(caps);
6773 hr = object->GetCaps( &caps );
6774 if ( hr == DS_OK ) {
6775 if ( caps.dwFlags & DSCAPS_PRIMARYMONO || caps.dwFlags & DSCAPS_PRIMARYSTEREO )
6781 // If good device, then save its name and guid.
6782 std::string name = convertCharPointerToStdString( description );
6783 //if ( name == "Primary Sound Driver" || name == "Primary Sound Capture Driver" )
6784 if ( lpguid == NULL )
6785 name = "Default Device";
6786 if ( validDevice ) {
6787 for ( unsigned int i=0; i<dsDevices.size(); i++ ) {
6788 if ( dsDevices[i].name == name ) {
6789 dsDevices[i].found = true;
6790 if ( probeInfo.isInput ) {
6791 dsDevices[i].id[1] = lpguid;
6792 dsDevices[i].validId[1] = true;
6795 dsDevices[i].id[0] = lpguid;
6796 dsDevices[i].validId[0] = true;
6804 device.found = true;
6805 if ( probeInfo.isInput ) {
6806 device.id[1] = lpguid;
6807 device.validId[1] = true;
6810 device.id[0] = lpguid;
6811 device.validId[0] = true;
6813 dsDevices.push_back( device );
6819 static const char* getErrorString( int code )
6823 case DSERR_ALLOCATED:
6824 return "Already allocated";
6826 case DSERR_CONTROLUNAVAIL:
6827 return "Control unavailable";
6829 case DSERR_INVALIDPARAM:
6830 return "Invalid parameter";
6832 case DSERR_INVALIDCALL:
6833 return "Invalid call";
6836 return "Generic error";
6838 case DSERR_PRIOLEVELNEEDED:
6839 return "Priority level needed";
6841 case DSERR_OUTOFMEMORY:
6842 return "Out of memory";
6844 case DSERR_BADFORMAT:
6845 return "The sample rate or the channel format is not supported";
6847 case DSERR_UNSUPPORTED:
6848 return "Not supported";
6850 case DSERR_NODRIVER:
6853 case DSERR_ALREADYINITIALIZED:
6854 return "Already initialized";
6856 case DSERR_NOAGGREGATION:
6857 return "No aggregation";
6859 case DSERR_BUFFERLOST:
6860 return "Buffer lost";
6862 case DSERR_OTHERAPPHASPRIO:
6863 return "Another application already has priority";
6865 case DSERR_UNINITIALIZED:
6866 return "Uninitialized";
6869 return "DirectSound unknown error";
6872 //******************** End of __WINDOWS_DS__ *********************//
6876 #if defined(__LINUX_ALSA__)
6878 #include <alsa/asoundlib.h>
6881 // A structure to hold various information related to the ALSA API
6884 snd_pcm_t *handles[2];
6887 pthread_cond_t runnable_cv;
6891 :synchronized(false), runnable(false) { xrun[0] = false; xrun[1] = false; }
6894 static void *alsaCallbackHandler( void * ptr );
6896 RtApiAlsa :: RtApiAlsa()
6898 // Nothing to do here.
6901 RtApiAlsa :: ~RtApiAlsa()
6903 if ( stream_.state != STREAM_CLOSED ) closeStream();
6906 unsigned int RtApiAlsa :: getDeviceCount( void )
6908 unsigned nDevices = 0;
6909 int result, subdevice, card;
6913 // Count cards and devices
6915 snd_card_next( &card );
6916 while ( card >= 0 ) {
6917 sprintf( name, "hw:%d", card );
6918 result = snd_ctl_open( &handle, name, 0 );
6920 errorStream_ << "RtApiAlsa::getDeviceCount: control open, card = " << card << ", " << snd_strerror( result ) << ".";
6921 errorText_ = errorStream_.str();
6922 error( RtAudioError::WARNING );
6927 result = snd_ctl_pcm_next_device( handle, &subdevice );
6929 errorStream_ << "RtApiAlsa::getDeviceCount: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
6930 errorText_ = errorStream_.str();
6931 error( RtAudioError::WARNING );
6934 if ( subdevice < 0 )
6939 snd_ctl_close( handle );
6940 snd_card_next( &card );
6943 result = snd_ctl_open( &handle, "default", 0 );
6946 snd_ctl_close( handle );
6952 RtAudio::DeviceInfo RtApiAlsa :: getDeviceInfo( unsigned int device )
6954 RtAudio::DeviceInfo info;
6955 info.probed = false;
6957 unsigned nDevices = 0;
6958 int result, subdevice, card;
6962 // Count cards and devices
6965 snd_card_next( &card );
6966 while ( card >= 0 ) {
6967 sprintf( name, "hw:%d", card );
6968 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
6970 errorStream_ << "RtApiAlsa::getDeviceInfo: control open, card = " << card << ", " << snd_strerror( result ) << ".";
6971 errorText_ = errorStream_.str();
6972 error( RtAudioError::WARNING );
6977 result = snd_ctl_pcm_next_device( chandle, &subdevice );
6979 errorStream_ << "RtApiAlsa::getDeviceInfo: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
6980 errorText_ = errorStream_.str();
6981 error( RtAudioError::WARNING );
6984 if ( subdevice < 0 ) break;
6985 if ( nDevices == device ) {
6986 sprintf( name, "hw:%d,%d", card, subdevice );
6992 snd_ctl_close( chandle );
6993 snd_card_next( &card );
6996 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
6997 if ( result == 0 ) {
6998 if ( nDevices == device ) {
6999 strcpy( name, "default" );
7005 if ( nDevices == 0 ) {
7006 errorText_ = "RtApiAlsa::getDeviceInfo: no devices found!";
7007 error( RtAudioError::INVALID_USE );
7011 if ( device >= nDevices ) {
7012 errorText_ = "RtApiAlsa::getDeviceInfo: device ID is invalid!";
7013 error( RtAudioError::INVALID_USE );
7019 // If a stream is already open, we cannot probe the stream devices.
7020 // Thus, use the saved results.
7021 if ( stream_.state != STREAM_CLOSED &&
7022 ( stream_.device[0] == device || stream_.device[1] == device ) ) {
7023 snd_ctl_close( chandle );
7024 if ( device >= devices_.size() ) {
7025 errorText_ = "RtApiAlsa::getDeviceInfo: device ID was not present before stream was opened.";
7026 error( RtAudioError::WARNING );
7029 return devices_[ device ];
7032 int openMode = SND_PCM_ASYNC;
7033 snd_pcm_stream_t stream;
7034 snd_pcm_info_t *pcminfo;
7035 snd_pcm_info_alloca( &pcminfo );
7037 snd_pcm_hw_params_t *params;
7038 snd_pcm_hw_params_alloca( ¶ms );
7040 // First try for playback unless default device (which has subdev -1)
7041 stream = SND_PCM_STREAM_PLAYBACK;
7042 snd_pcm_info_set_stream( pcminfo, stream );
7043 if ( subdevice != -1 ) {
7044 snd_pcm_info_set_device( pcminfo, subdevice );
7045 snd_pcm_info_set_subdevice( pcminfo, 0 );
7047 result = snd_ctl_pcm_info( chandle, pcminfo );
7049 // Device probably doesn't support playback.
7054 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK );
7056 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7057 errorText_ = errorStream_.str();
7058 error( RtAudioError::WARNING );
7062 // The device is open ... fill the parameter structure.
7063 result = snd_pcm_hw_params_any( phandle, params );
7065 snd_pcm_close( phandle );
7066 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7067 errorText_ = errorStream_.str();
7068 error( RtAudioError::WARNING );
7072 // Get output channel information.
7074 result = snd_pcm_hw_params_get_channels_max( params, &value );
7076 snd_pcm_close( phandle );
7077 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") output channels, " << snd_strerror( result ) << ".";
7078 errorText_ = errorStream_.str();
7079 error( RtAudioError::WARNING );
7082 info.outputChannels = value;
7083 snd_pcm_close( phandle );
7086 stream = SND_PCM_STREAM_CAPTURE;
7087 snd_pcm_info_set_stream( pcminfo, stream );
7089 // Now try for capture unless default device (with subdev = -1)
7090 if ( subdevice != -1 ) {
7091 result = snd_ctl_pcm_info( chandle, pcminfo );
7092 snd_ctl_close( chandle );
7094 // Device probably doesn't support capture.
7095 if ( info.outputChannels == 0 ) return info;
7096 goto probeParameters;
7100 snd_ctl_close( chandle );
7102 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
7104 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7105 errorText_ = errorStream_.str();
7106 error( RtAudioError::WARNING );
7107 if ( info.outputChannels == 0 ) return info;
7108 goto probeParameters;
7111 // The device is open ... fill the parameter structure.
7112 result = snd_pcm_hw_params_any( phandle, params );
7114 snd_pcm_close( phandle );
7115 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7116 errorText_ = errorStream_.str();
7117 error( RtAudioError::WARNING );
7118 if ( info.outputChannels == 0 ) return info;
7119 goto probeParameters;
7122 result = snd_pcm_hw_params_get_channels_max( params, &value );
7124 snd_pcm_close( phandle );
7125 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") input channels, " << snd_strerror( result ) << ".";
7126 errorText_ = errorStream_.str();
7127 error( RtAudioError::WARNING );
7128 if ( info.outputChannels == 0 ) return info;
7129 goto probeParameters;
7131 info.inputChannels = value;
7132 snd_pcm_close( phandle );
7134 // If device opens for both playback and capture, we determine the channels.
7135 if ( info.outputChannels > 0 && info.inputChannels > 0 )
7136 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
7138 // ALSA doesn't provide default devices so we'll use the first available one.
7139 if ( device == 0 && info.outputChannels > 0 )
7140 info.isDefaultOutput = true;
7141 if ( device == 0 && info.inputChannels > 0 )
7142 info.isDefaultInput = true;
7145 // At this point, we just need to figure out the supported data
7146 // formats and sample rates. We'll proceed by opening the device in
7147 // the direction with the maximum number of channels, or playback if
7148 // they are equal. This might limit our sample rate options, but so
7151 if ( info.outputChannels >= info.inputChannels )
7152 stream = SND_PCM_STREAM_PLAYBACK;
7154 stream = SND_PCM_STREAM_CAPTURE;
7155 snd_pcm_info_set_stream( pcminfo, stream );
7157 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
7159 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7160 errorText_ = errorStream_.str();
7161 error( RtAudioError::WARNING );
7165 // The device is open ... fill the parameter structure.
7166 result = snd_pcm_hw_params_any( phandle, params );
7168 snd_pcm_close( phandle );
7169 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7170 errorText_ = errorStream_.str();
7171 error( RtAudioError::WARNING );
7175 // Test our discrete set of sample rate values.
7176 info.sampleRates.clear();
7177 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
7178 if ( snd_pcm_hw_params_test_rate( phandle, params, SAMPLE_RATES[i], 0 ) == 0 ) {
7179 info.sampleRates.push_back( SAMPLE_RATES[i] );
7181 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
7182 info.preferredSampleRate = SAMPLE_RATES[i];
7185 if ( info.sampleRates.size() == 0 ) {
7186 snd_pcm_close( phandle );
7187 errorStream_ << "RtApiAlsa::getDeviceInfo: no supported sample rates found for device (" << name << ").";
7188 errorText_ = errorStream_.str();
7189 error( RtAudioError::WARNING );
7193 // Probe the supported data formats ... we don't care about endian-ness just yet
7194 snd_pcm_format_t format;
7195 info.nativeFormats = 0;
7196 format = SND_PCM_FORMAT_S8;
7197 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7198 info.nativeFormats |= RTAUDIO_SINT8;
7199 format = SND_PCM_FORMAT_S16;
7200 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7201 info.nativeFormats |= RTAUDIO_SINT16;
7202 format = SND_PCM_FORMAT_S24;
7203 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7204 info.nativeFormats |= RTAUDIO_SINT24;
7205 format = SND_PCM_FORMAT_S32;
7206 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7207 info.nativeFormats |= RTAUDIO_SINT32;
7208 format = SND_PCM_FORMAT_FLOAT;
7209 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7210 info.nativeFormats |= RTAUDIO_FLOAT32;
7211 format = SND_PCM_FORMAT_FLOAT64;
7212 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7213 info.nativeFormats |= RTAUDIO_FLOAT64;
7215 // Check that we have at least one supported format
7216 if ( info.nativeFormats == 0 ) {
7217 snd_pcm_close( phandle );
7218 errorStream_ << "RtApiAlsa::getDeviceInfo: pcm device (" << name << ") data format not supported by RtAudio.";
7219 errorText_ = errorStream_.str();
7220 error( RtAudioError::WARNING );
7224 // Get the device name
7226 result = snd_card_get_name( card, &cardname );
7227 if ( result >= 0 ) {
7228 sprintf( name, "hw:%s,%d", cardname, subdevice );
7233 // That's all ... close the device and return
7234 snd_pcm_close( phandle );
7239 void RtApiAlsa :: saveDeviceInfo( void )
7243 unsigned int nDevices = getDeviceCount();
7244 devices_.resize( nDevices );
7245 for ( unsigned int i=0; i<nDevices; i++ )
7246 devices_[i] = getDeviceInfo( i );
7249 bool RtApiAlsa :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
7250 unsigned int firstChannel, unsigned int sampleRate,
7251 RtAudioFormat format, unsigned int *bufferSize,
7252 RtAudio::StreamOptions *options )
7255 #if defined(__RTAUDIO_DEBUG__)
7257 snd_output_stdio_attach(&out, stderr, 0);
7260 // I'm not using the "plug" interface ... too much inconsistent behavior.
7262 unsigned nDevices = 0;
7263 int result, subdevice, card;
7267 if ( options && options->flags & RTAUDIO_ALSA_USE_DEFAULT )
7268 snprintf(name, sizeof(name), "%s", "default");
7270 // Count cards and devices
7272 snd_card_next( &card );
7273 while ( card >= 0 ) {
7274 sprintf( name, "hw:%d", card );
7275 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
7277 errorStream_ << "RtApiAlsa::probeDeviceOpen: control open, card = " << card << ", " << snd_strerror( result ) << ".";
7278 errorText_ = errorStream_.str();
7283 result = snd_ctl_pcm_next_device( chandle, &subdevice );
7284 if ( result < 0 ) break;
7285 if ( subdevice < 0 ) break;
7286 if ( nDevices == device ) {
7287 sprintf( name, "hw:%d,%d", card, subdevice );
7288 snd_ctl_close( chandle );
7293 snd_ctl_close( chandle );
7294 snd_card_next( &card );
7297 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
7298 if ( result == 0 ) {
7299 if ( nDevices == device ) {
7300 strcpy( name, "default" );
7306 if ( nDevices == 0 ) {
7307 // This should not happen because a check is made before this function is called.
7308 errorText_ = "RtApiAlsa::probeDeviceOpen: no devices found!";
7312 if ( device >= nDevices ) {
7313 // This should not happen because a check is made before this function is called.
7314 errorText_ = "RtApiAlsa::probeDeviceOpen: device ID is invalid!";
7321 // The getDeviceInfo() function will not work for a device that is
7322 // already open. Thus, we'll probe the system before opening a
7323 // stream and save the results for use by getDeviceInfo().
7324 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) // only do once
7325 this->saveDeviceInfo();
7327 snd_pcm_stream_t stream;
7328 if ( mode == OUTPUT )
7329 stream = SND_PCM_STREAM_PLAYBACK;
7331 stream = SND_PCM_STREAM_CAPTURE;
7334 int openMode = SND_PCM_ASYNC;
7335 result = snd_pcm_open( &phandle, name, stream, openMode );
7337 if ( mode == OUTPUT )
7338 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for output.";
7340 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for input.";
7341 errorText_ = errorStream_.str();
7345 // Fill the parameter structure.
7346 snd_pcm_hw_params_t *hw_params;
7347 snd_pcm_hw_params_alloca( &hw_params );
7348 result = snd_pcm_hw_params_any( phandle, hw_params );
7350 snd_pcm_close( phandle );
7351 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") parameters, " << snd_strerror( result ) << ".";
7352 errorText_ = errorStream_.str();
7356 #if defined(__RTAUDIO_DEBUG__)
7357 fprintf( stderr, "\nRtApiAlsa: dump hardware params just after device open:\n\n" );
7358 snd_pcm_hw_params_dump( hw_params, out );
7361 // Set access ... check user preference.
7362 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) {
7363 stream_.userInterleaved = false;
7364 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
7366 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
7367 stream_.deviceInterleaved[mode] = true;
7370 stream_.deviceInterleaved[mode] = false;
7373 stream_.userInterleaved = true;
7374 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
7376 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
7377 stream_.deviceInterleaved[mode] = false;
7380 stream_.deviceInterleaved[mode] = true;
7384 snd_pcm_close( phandle );
7385 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") access, " << snd_strerror( result ) << ".";
7386 errorText_ = errorStream_.str();
7390 // Determine how to set the device format.
7391 stream_.userFormat = format;
7392 snd_pcm_format_t deviceFormat = SND_PCM_FORMAT_UNKNOWN;
7394 if ( format == RTAUDIO_SINT8 )
7395 deviceFormat = SND_PCM_FORMAT_S8;
7396 else if ( format == RTAUDIO_SINT16 )
7397 deviceFormat = SND_PCM_FORMAT_S16;
7398 else if ( format == RTAUDIO_SINT24 )
7399 deviceFormat = SND_PCM_FORMAT_S24;
7400 else if ( format == RTAUDIO_SINT32 )
7401 deviceFormat = SND_PCM_FORMAT_S32;
7402 else if ( format == RTAUDIO_FLOAT32 )
7403 deviceFormat = SND_PCM_FORMAT_FLOAT;
7404 else if ( format == RTAUDIO_FLOAT64 )
7405 deviceFormat = SND_PCM_FORMAT_FLOAT64;
7407 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat) == 0) {
7408 stream_.deviceFormat[mode] = format;
7412 // The user requested format is not natively supported by the device.
7413 deviceFormat = SND_PCM_FORMAT_FLOAT64;
7414 if ( snd_pcm_hw_params_test_format( phandle, hw_params, deviceFormat ) == 0 ) {
7415 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
7419 deviceFormat = SND_PCM_FORMAT_FLOAT;
7420 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7421 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
7425 deviceFormat = SND_PCM_FORMAT_S32;
7426 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7427 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
7431 deviceFormat = SND_PCM_FORMAT_S24;
7432 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7433 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
7437 deviceFormat = SND_PCM_FORMAT_S16;
7438 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7439 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
7443 deviceFormat = SND_PCM_FORMAT_S8;
7444 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7445 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
7449 // If we get here, no supported format was found.
7450 snd_pcm_close( phandle );
7451 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device " << device << " data format not supported by RtAudio.";
7452 errorText_ = errorStream_.str();
7456 result = snd_pcm_hw_params_set_format( phandle, hw_params, deviceFormat );
7458 snd_pcm_close( phandle );
7459 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") data format, " << snd_strerror( result ) << ".";
7460 errorText_ = errorStream_.str();
7464 // Determine whether byte-swaping is necessary.
7465 stream_.doByteSwap[mode] = false;
7466 if ( deviceFormat != SND_PCM_FORMAT_S8 ) {
7467 result = snd_pcm_format_cpu_endian( deviceFormat );
7469 stream_.doByteSwap[mode] = true;
7470 else if (result < 0) {
7471 snd_pcm_close( phandle );
7472 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") endian-ness, " << snd_strerror( result ) << ".";
7473 errorText_ = errorStream_.str();
7478 // Set the sample rate.
7479 result = snd_pcm_hw_params_set_rate_near( phandle, hw_params, (unsigned int*) &sampleRate, 0 );
7481 snd_pcm_close( phandle );
7482 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting sample rate on device (" << name << "), " << snd_strerror( result ) << ".";
7483 errorText_ = errorStream_.str();
7487 // Determine the number of channels for this device. We support a possible
7488 // minimum device channel number > than the value requested by the user.
7489 stream_.nUserChannels[mode] = channels;
7491 result = snd_pcm_hw_params_get_channels_max( hw_params, &value );
7492 unsigned int deviceChannels = value;
7493 if ( result < 0 || deviceChannels < channels + firstChannel ) {
7494 snd_pcm_close( phandle );
7495 errorStream_ << "RtApiAlsa::probeDeviceOpen: requested channel parameters not supported by device (" << name << "), " << snd_strerror( result ) << ".";
7496 errorText_ = errorStream_.str();
7500 result = snd_pcm_hw_params_get_channels_min( hw_params, &value );
7502 snd_pcm_close( phandle );
7503 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting minimum channels for device (" << name << "), " << snd_strerror( result ) << ".";
7504 errorText_ = errorStream_.str();
7507 deviceChannels = value;
7508 if ( deviceChannels < channels + firstChannel ) deviceChannels = channels + firstChannel;
7509 stream_.nDeviceChannels[mode] = deviceChannels;
7511 // Set the device channels.
7512 result = snd_pcm_hw_params_set_channels( phandle, hw_params, deviceChannels );
7514 snd_pcm_close( phandle );
7515 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting channels for device (" << name << "), " << snd_strerror( result ) << ".";
7516 errorText_ = errorStream_.str();
7520 // Set the buffer (or period) size.
7522 snd_pcm_uframes_t periodSize = *bufferSize;
7523 result = snd_pcm_hw_params_set_period_size_near( phandle, hw_params, &periodSize, &dir );
7525 snd_pcm_close( phandle );
7526 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting period size for device (" << name << "), " << snd_strerror( result ) << ".";
7527 errorText_ = errorStream_.str();
7530 *bufferSize = periodSize;
7532 // Set the buffer number, which in ALSA is referred to as the "period".
7533 unsigned int periods = 0;
7534 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) periods = 2;
7535 if ( options && options->numberOfBuffers > 0 ) periods = options->numberOfBuffers;
7536 if ( periods < 2 ) periods = 4; // a fairly safe default value
7537 result = snd_pcm_hw_params_set_periods_near( phandle, hw_params, &periods, &dir );
7539 snd_pcm_close( phandle );
7540 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting periods for device (" << name << "), " << snd_strerror( result ) << ".";
7541 errorText_ = errorStream_.str();
7545 // If attempting to setup a duplex stream, the bufferSize parameter
7546 // MUST be the same in both directions!
7547 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
7548 snd_pcm_close( phandle );
7549 errorStream_ << "RtApiAlsa::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << name << ").";
7550 errorText_ = errorStream_.str();
7554 stream_.bufferSize = *bufferSize;
7556 // Install the hardware configuration
7557 result = snd_pcm_hw_params( phandle, hw_params );
7559 snd_pcm_close( phandle );
7560 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing hardware configuration on device (" << name << "), " << snd_strerror( result ) << ".";
7561 errorText_ = errorStream_.str();
7565 #if defined(__RTAUDIO_DEBUG__)
7566 fprintf(stderr, "\nRtApiAlsa: dump hardware params after installation:\n\n");
7567 snd_pcm_hw_params_dump( hw_params, out );
7570 // Set the software configuration to fill buffers with zeros and prevent device stopping on xruns.
7571 snd_pcm_sw_params_t *sw_params = NULL;
7572 snd_pcm_sw_params_alloca( &sw_params );
7573 snd_pcm_sw_params_current( phandle, sw_params );
7574 snd_pcm_sw_params_set_start_threshold( phandle, sw_params, *bufferSize );
7575 snd_pcm_sw_params_set_stop_threshold( phandle, sw_params, ULONG_MAX );
7576 snd_pcm_sw_params_set_silence_threshold( phandle, sw_params, 0 );
7578 // The following two settings were suggested by Theo Veenker
7579 //snd_pcm_sw_params_set_avail_min( phandle, sw_params, *bufferSize );
7580 //snd_pcm_sw_params_set_xfer_align( phandle, sw_params, 1 );
7582 // here are two options for a fix
7583 //snd_pcm_sw_params_set_silence_size( phandle, sw_params, ULONG_MAX );
7584 snd_pcm_uframes_t val;
7585 snd_pcm_sw_params_get_boundary( sw_params, &val );
7586 snd_pcm_sw_params_set_silence_size( phandle, sw_params, val );
7588 result = snd_pcm_sw_params( phandle, sw_params );
7590 snd_pcm_close( phandle );
7591 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing software configuration on device (" << name << "), " << snd_strerror( result ) << ".";
7592 errorText_ = errorStream_.str();
7596 #if defined(__RTAUDIO_DEBUG__)
7597 fprintf(stderr, "\nRtApiAlsa: dump software params after installation:\n\n");
7598 snd_pcm_sw_params_dump( sw_params, out );
7601 // Set flags for buffer conversion
7602 stream_.doConvertBuffer[mode] = false;
7603 if ( stream_.userFormat != stream_.deviceFormat[mode] )
7604 stream_.doConvertBuffer[mode] = true;
7605 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
7606 stream_.doConvertBuffer[mode] = true;
7607 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
7608 stream_.nUserChannels[mode] > 1 )
7609 stream_.doConvertBuffer[mode] = true;
7611 // Allocate the ApiHandle if necessary and then save.
7612 AlsaHandle *apiInfo = 0;
7613 if ( stream_.apiHandle == 0 ) {
7615 apiInfo = (AlsaHandle *) new AlsaHandle;
7617 catch ( std::bad_alloc& ) {
7618 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating AlsaHandle memory.";
7622 if ( pthread_cond_init( &apiInfo->runnable_cv, NULL ) ) {
7623 errorText_ = "RtApiAlsa::probeDeviceOpen: error initializing pthread condition variable.";
7627 stream_.apiHandle = (void *) apiInfo;
7628 apiInfo->handles[0] = 0;
7629 apiInfo->handles[1] = 0;
7632 apiInfo = (AlsaHandle *) stream_.apiHandle;
7634 apiInfo->handles[mode] = phandle;
7637 // Allocate necessary internal buffers.
7638 unsigned long bufferBytes;
7639 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
7640 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
7641 if ( stream_.userBuffer[mode] == NULL ) {
7642 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating user buffer memory.";
7646 if ( stream_.doConvertBuffer[mode] ) {
7648 bool makeBuffer = true;
7649 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
7650 if ( mode == INPUT ) {
7651 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
7652 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
7653 if ( bufferBytes <= bytesOut ) makeBuffer = false;
7658 bufferBytes *= *bufferSize;
7659 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
7660 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
7661 if ( stream_.deviceBuffer == NULL ) {
7662 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating device buffer memory.";
7668 stream_.sampleRate = sampleRate;
7669 stream_.nBuffers = periods;
7670 stream_.device[mode] = device;
7671 stream_.state = STREAM_STOPPED;
7673 // Setup the buffer conversion information structure.
7674 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
7676 // Setup thread if necessary.
7677 if ( stream_.mode == OUTPUT && mode == INPUT ) {
7678 // We had already set up an output stream.
7679 stream_.mode = DUPLEX;
7680 // Link the streams if possible.
7681 apiInfo->synchronized = false;
7682 if ( snd_pcm_link( apiInfo->handles[0], apiInfo->handles[1] ) == 0 )
7683 apiInfo->synchronized = true;
7685 errorText_ = "RtApiAlsa::probeDeviceOpen: unable to synchronize input and output devices.";
7686 error( RtAudioError::WARNING );
7690 stream_.mode = mode;
7692 // Setup callback thread.
7693 stream_.callbackInfo.object = (void *) this;
7695 // Set the thread attributes for joinable and realtime scheduling
7696 // priority (optional). The higher priority will only take affect
7697 // if the program is run as root or suid. Note, under Linux
7698 // processes with CAP_SYS_NICE privilege, a user can change
7699 // scheduling policy and priority (thus need not be root). See
7700 // POSIX "capabilities".
7701 pthread_attr_t attr;
7702 pthread_attr_init( &attr );
7703 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
7704 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
7705 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
7706 stream_.callbackInfo.doRealtime = true;
7707 struct sched_param param;
7708 int priority = options->priority;
7709 int min = sched_get_priority_min( SCHED_RR );
7710 int max = sched_get_priority_max( SCHED_RR );
7711 if ( priority < min ) priority = min;
7712 else if ( priority > max ) priority = max;
7713 param.sched_priority = priority;
7715 // Set the policy BEFORE the priority. Otherwise it fails.
7716 pthread_attr_setschedpolicy(&attr, SCHED_RR);
7717 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
7718 // This is definitely required. Otherwise it fails.
7719 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
7720 pthread_attr_setschedparam(&attr, ¶m);
7723 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
7725 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
7728 stream_.callbackInfo.isRunning = true;
7729 result = pthread_create( &stream_.callbackInfo.thread, &attr, alsaCallbackHandler, &stream_.callbackInfo );
7730 pthread_attr_destroy( &attr );
7732 // Failed. Try instead with default attributes.
7733 result = pthread_create( &stream_.callbackInfo.thread, NULL, alsaCallbackHandler, &stream_.callbackInfo );
7735 stream_.callbackInfo.isRunning = false;
7736 errorText_ = "RtApiAlsa::error creating callback thread!";
7746 pthread_cond_destroy( &apiInfo->runnable_cv );
7747 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
7748 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
7750 stream_.apiHandle = 0;
7753 if ( phandle) snd_pcm_close( phandle );
7755 for ( int i=0; i<2; i++ ) {
7756 if ( stream_.userBuffer[i] ) {
7757 free( stream_.userBuffer[i] );
7758 stream_.userBuffer[i] = 0;
7762 if ( stream_.deviceBuffer ) {
7763 free( stream_.deviceBuffer );
7764 stream_.deviceBuffer = 0;
7767 stream_.state = STREAM_CLOSED;
7771 void RtApiAlsa :: closeStream()
7773 if ( stream_.state == STREAM_CLOSED ) {
7774 errorText_ = "RtApiAlsa::closeStream(): no open stream to close!";
7775 error( RtAudioError::WARNING );
7779 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
7780 stream_.callbackInfo.isRunning = false;
7781 MUTEX_LOCK( &stream_.mutex );
7782 if ( stream_.state == STREAM_STOPPED ) {
7783 apiInfo->runnable = true;
7784 pthread_cond_signal( &apiInfo->runnable_cv );
7786 MUTEX_UNLOCK( &stream_.mutex );
7787 pthread_join( stream_.callbackInfo.thread, NULL );
7789 if ( stream_.state == STREAM_RUNNING ) {
7790 stream_.state = STREAM_STOPPED;
7791 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
7792 snd_pcm_drop( apiInfo->handles[0] );
7793 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
7794 snd_pcm_drop( apiInfo->handles[1] );
7798 pthread_cond_destroy( &apiInfo->runnable_cv );
7799 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
7800 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
7802 stream_.apiHandle = 0;
7805 for ( int i=0; i<2; i++ ) {
7806 if ( stream_.userBuffer[i] ) {
7807 free( stream_.userBuffer[i] );
7808 stream_.userBuffer[i] = 0;
7812 if ( stream_.deviceBuffer ) {
7813 free( stream_.deviceBuffer );
7814 stream_.deviceBuffer = 0;
7817 stream_.mode = UNINITIALIZED;
7818 stream_.state = STREAM_CLOSED;
7821 void RtApiAlsa :: startStream()
7823 // This method calls snd_pcm_prepare if the device isn't already in that state.
7826 if ( stream_.state == STREAM_RUNNING ) {
7827 errorText_ = "RtApiAlsa::startStream(): the stream is already running!";
7828 error( RtAudioError::WARNING );
7832 MUTEX_LOCK( &stream_.mutex );
7835 snd_pcm_state_t state;
7836 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
7837 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
7838 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
7839 state = snd_pcm_state( handle[0] );
7840 if ( state != SND_PCM_STATE_PREPARED ) {
7841 result = snd_pcm_prepare( handle[0] );
7843 errorStream_ << "RtApiAlsa::startStream: error preparing output pcm device, " << snd_strerror( result ) << ".";
7844 errorText_ = errorStream_.str();
7850 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
7851 result = snd_pcm_drop(handle[1]); // fix to remove stale data received since device has been open
7852 state = snd_pcm_state( handle[1] );
7853 if ( state != SND_PCM_STATE_PREPARED ) {
7854 result = snd_pcm_prepare( handle[1] );
7856 errorStream_ << "RtApiAlsa::startStream: error preparing input pcm device, " << snd_strerror( result ) << ".";
7857 errorText_ = errorStream_.str();
7863 stream_.state = STREAM_RUNNING;
7866 apiInfo->runnable = true;
7867 pthread_cond_signal( &apiInfo->runnable_cv );
7868 MUTEX_UNLOCK( &stream_.mutex );
7870 if ( result >= 0 ) return;
7871 error( RtAudioError::SYSTEM_ERROR );
7874 void RtApiAlsa :: stopStream()
7877 if ( stream_.state == STREAM_STOPPED ) {
7878 errorText_ = "RtApiAlsa::stopStream(): the stream is already stopped!";
7879 error( RtAudioError::WARNING );
7883 stream_.state = STREAM_STOPPED;
7884 MUTEX_LOCK( &stream_.mutex );
7887 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
7888 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
7889 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
7890 if ( apiInfo->synchronized )
7891 result = snd_pcm_drop( handle[0] );
7893 result = snd_pcm_drain( handle[0] );
7895 errorStream_ << "RtApiAlsa::stopStream: error draining output pcm device, " << snd_strerror( result ) << ".";
7896 errorText_ = errorStream_.str();
7901 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
7902 result = snd_pcm_drop( handle[1] );
7904 errorStream_ << "RtApiAlsa::stopStream: error stopping input pcm device, " << snd_strerror( result ) << ".";
7905 errorText_ = errorStream_.str();
7911 apiInfo->runnable = false; // fixes high CPU usage when stopped
7912 MUTEX_UNLOCK( &stream_.mutex );
7914 if ( result >= 0 ) return;
7915 error( RtAudioError::SYSTEM_ERROR );
7918 void RtApiAlsa :: abortStream()
7921 if ( stream_.state == STREAM_STOPPED ) {
7922 errorText_ = "RtApiAlsa::abortStream(): the stream is already stopped!";
7923 error( RtAudioError::WARNING );
7927 stream_.state = STREAM_STOPPED;
7928 MUTEX_LOCK( &stream_.mutex );
7931 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
7932 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
7933 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
7934 result = snd_pcm_drop( handle[0] );
7936 errorStream_ << "RtApiAlsa::abortStream: error aborting output pcm device, " << snd_strerror( result ) << ".";
7937 errorText_ = errorStream_.str();
7942 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
7943 result = snd_pcm_drop( handle[1] );
7945 errorStream_ << "RtApiAlsa::abortStream: error aborting input pcm device, " << snd_strerror( result ) << ".";
7946 errorText_ = errorStream_.str();
7952 apiInfo->runnable = false; // fixes high CPU usage when stopped
7953 MUTEX_UNLOCK( &stream_.mutex );
7955 if ( result >= 0 ) return;
7956 error( RtAudioError::SYSTEM_ERROR );
7959 void RtApiAlsa :: callbackEvent()
7961 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
7962 if ( stream_.state == STREAM_STOPPED ) {
7963 MUTEX_LOCK( &stream_.mutex );
7964 while ( !apiInfo->runnable )
7965 pthread_cond_wait( &apiInfo->runnable_cv, &stream_.mutex );
7967 if ( stream_.state != STREAM_RUNNING ) {
7968 MUTEX_UNLOCK( &stream_.mutex );
7971 MUTEX_UNLOCK( &stream_.mutex );
7974 if ( stream_.state == STREAM_CLOSED ) {
7975 errorText_ = "RtApiAlsa::callbackEvent(): the stream is closed ... this shouldn't happen!";
7976 error( RtAudioError::WARNING );
7980 int doStopStream = 0;
7981 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
7982 double streamTime = getStreamTime();
7983 RtAudioStreamStatus status = 0;
7984 if ( stream_.mode != INPUT && apiInfo->xrun[0] == true ) {
7985 status |= RTAUDIO_OUTPUT_UNDERFLOW;
7986 apiInfo->xrun[0] = false;
7988 if ( stream_.mode != OUTPUT && apiInfo->xrun[1] == true ) {
7989 status |= RTAUDIO_INPUT_OVERFLOW;
7990 apiInfo->xrun[1] = false;
7992 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
7993 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
7995 if ( doStopStream == 2 ) {
8000 MUTEX_LOCK( &stream_.mutex );
8002 // The state might change while waiting on a mutex.
8003 if ( stream_.state == STREAM_STOPPED ) goto unlock;
8009 snd_pcm_sframes_t frames;
8010 RtAudioFormat format;
8011 handle = (snd_pcm_t **) apiInfo->handles;
8013 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
8015 // Setup parameters.
8016 if ( stream_.doConvertBuffer[1] ) {
8017 buffer = stream_.deviceBuffer;
8018 channels = stream_.nDeviceChannels[1];
8019 format = stream_.deviceFormat[1];
8022 buffer = stream_.userBuffer[1];
8023 channels = stream_.nUserChannels[1];
8024 format = stream_.userFormat;
8027 // Read samples from device in interleaved/non-interleaved format.
8028 if ( stream_.deviceInterleaved[1] )
8029 result = snd_pcm_readi( handle[1], buffer, stream_.bufferSize );
8031 void *bufs[channels];
8032 size_t offset = stream_.bufferSize * formatBytes( format );
8033 for ( int i=0; i<channels; i++ )
8034 bufs[i] = (void *) (buffer + (i * offset));
8035 result = snd_pcm_readn( handle[1], bufs, stream_.bufferSize );
8038 if ( result < (int) stream_.bufferSize ) {
8039 // Either an error or overrun occured.
8040 if ( result == -EPIPE ) {
8041 snd_pcm_state_t state = snd_pcm_state( handle[1] );
8042 if ( state == SND_PCM_STATE_XRUN ) {
8043 apiInfo->xrun[1] = true;
8044 result = snd_pcm_prepare( handle[1] );
8046 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after overrun, " << snd_strerror( result ) << ".";
8047 errorText_ = errorStream_.str();
8051 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
8052 errorText_ = errorStream_.str();
8056 errorStream_ << "RtApiAlsa::callbackEvent: audio read error, " << snd_strerror( result ) << ".";
8057 errorText_ = errorStream_.str();
8059 error( RtAudioError::WARNING );
8063 // Do byte swapping if necessary.
8064 if ( stream_.doByteSwap[1] )
8065 byteSwapBuffer( buffer, stream_.bufferSize * channels, format );
8067 // Do buffer conversion if necessary.
8068 if ( stream_.doConvertBuffer[1] )
8069 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
8071 // Check stream latency
8072 result = snd_pcm_delay( handle[1], &frames );
8073 if ( result == 0 && frames > 0 ) stream_.latency[1] = frames;
8078 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8080 // Setup parameters and do buffer conversion if necessary.
8081 if ( stream_.doConvertBuffer[0] ) {
8082 buffer = stream_.deviceBuffer;
8083 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
8084 channels = stream_.nDeviceChannels[0];
8085 format = stream_.deviceFormat[0];
8088 buffer = stream_.userBuffer[0];
8089 channels = stream_.nUserChannels[0];
8090 format = stream_.userFormat;
8093 // Do byte swapping if necessary.
8094 if ( stream_.doByteSwap[0] )
8095 byteSwapBuffer(buffer, stream_.bufferSize * channels, format);
8097 // Write samples to device in interleaved/non-interleaved format.
8098 if ( stream_.deviceInterleaved[0] )
8099 result = snd_pcm_writei( handle[0], buffer, stream_.bufferSize );
8101 void *bufs[channels];
8102 size_t offset = stream_.bufferSize * formatBytes( format );
8103 for ( int i=0; i<channels; i++ )
8104 bufs[i] = (void *) (buffer + (i * offset));
8105 result = snd_pcm_writen( handle[0], bufs, stream_.bufferSize );
8108 if ( result < (int) stream_.bufferSize ) {
8109 // Either an error or underrun occured.
8110 if ( result == -EPIPE ) {
8111 snd_pcm_state_t state = snd_pcm_state( handle[0] );
8112 if ( state == SND_PCM_STATE_XRUN ) {
8113 apiInfo->xrun[0] = true;
8114 result = snd_pcm_prepare( handle[0] );
8116 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after underrun, " << snd_strerror( result ) << ".";
8117 errorText_ = errorStream_.str();
8120 errorText_ = "RtApiAlsa::callbackEvent: audio write error, underrun.";
8123 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
8124 errorText_ = errorStream_.str();
8128 errorStream_ << "RtApiAlsa::callbackEvent: audio write error, " << snd_strerror( result ) << ".";
8129 errorText_ = errorStream_.str();
8131 error( RtAudioError::WARNING );
8135 // Check stream latency
8136 result = snd_pcm_delay( handle[0], &frames );
8137 if ( result == 0 && frames > 0 ) stream_.latency[0] = frames;
8141 MUTEX_UNLOCK( &stream_.mutex );
8143 RtApi::tickStreamTime();
8144 if ( doStopStream == 1 ) this->stopStream();
8147 static void *alsaCallbackHandler( void *ptr )
8149 CallbackInfo *info = (CallbackInfo *) ptr;
8150 RtApiAlsa *object = (RtApiAlsa *) info->object;
8151 bool *isRunning = &info->isRunning;
8153 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
8154 if ( info->doRealtime ) {
8155 std::cerr << "RtAudio alsa: " <<
8156 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
8157 "running realtime scheduling" << std::endl;
8161 while ( *isRunning == true ) {
8162 pthread_testcancel();
8163 object->callbackEvent();
8166 pthread_exit( NULL );
8169 //******************** End of __LINUX_ALSA__ *********************//
8172 #if defined(__LINUX_PULSE__)
8174 // Code written by Peter Meerwald, pmeerw@pmeerw.net
8175 // and Tristan Matthews.
8177 #include <pulse/error.h>
8178 #include <pulse/simple.h>
8181 static const unsigned int SUPPORTED_SAMPLERATES[] = { 8000, 16000, 22050, 32000,
8182 44100, 48000, 96000, 0};
8184 struct rtaudio_pa_format_mapping_t {
8185 RtAudioFormat rtaudio_format;
8186 pa_sample_format_t pa_format;
8189 static const rtaudio_pa_format_mapping_t supported_sampleformats[] = {
8190 {RTAUDIO_SINT16, PA_SAMPLE_S16LE},
8191 {RTAUDIO_SINT32, PA_SAMPLE_S32LE},
8192 {RTAUDIO_FLOAT32, PA_SAMPLE_FLOAT32LE},
8193 {0, PA_SAMPLE_INVALID}};
8195 struct PulseAudioHandle {
8199 pthread_cond_t runnable_cv;
8201 PulseAudioHandle() : s_play(0), s_rec(0), runnable(false) { }
8204 RtApiPulse::~RtApiPulse()
8206 if ( stream_.state != STREAM_CLOSED )
8210 unsigned int RtApiPulse::getDeviceCount( void )
8215 RtAudio::DeviceInfo RtApiPulse::getDeviceInfo( unsigned int /*device*/ )
8217 RtAudio::DeviceInfo info;
8219 info.name = "PulseAudio";
8220 info.outputChannels = 2;
8221 info.inputChannels = 2;
8222 info.duplexChannels = 2;
8223 info.isDefaultOutput = true;
8224 info.isDefaultInput = true;
8226 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr )
8227 info.sampleRates.push_back( *sr );
8229 info.preferredSampleRate = 48000;
8230 info.nativeFormats = RTAUDIO_SINT16 | RTAUDIO_SINT32 | RTAUDIO_FLOAT32;
8235 static void *pulseaudio_callback( void * user )
8237 CallbackInfo *cbi = static_cast<CallbackInfo *>( user );
8238 RtApiPulse *context = static_cast<RtApiPulse *>( cbi->object );
8239 volatile bool *isRunning = &cbi->isRunning;
8241 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
8242 if (cbi->doRealtime) {
8243 std::cerr << "RtAudio pulse: " <<
8244 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
8245 "running realtime scheduling" << std::endl;
8249 while ( *isRunning ) {
8250 pthread_testcancel();
8251 context->callbackEvent();
8254 pthread_exit( NULL );
8257 void RtApiPulse::closeStream( void )
8259 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8261 stream_.callbackInfo.isRunning = false;
8263 MUTEX_LOCK( &stream_.mutex );
8264 if ( stream_.state == STREAM_STOPPED ) {
8265 pah->runnable = true;
8266 pthread_cond_signal( &pah->runnable_cv );
8268 MUTEX_UNLOCK( &stream_.mutex );
8270 pthread_join( pah->thread, 0 );
8271 if ( pah->s_play ) {
8272 pa_simple_flush( pah->s_play, NULL );
8273 pa_simple_free( pah->s_play );
8276 pa_simple_free( pah->s_rec );
8278 pthread_cond_destroy( &pah->runnable_cv );
8280 stream_.apiHandle = 0;
8283 if ( stream_.userBuffer[0] ) {
8284 free( stream_.userBuffer[0] );
8285 stream_.userBuffer[0] = 0;
8287 if ( stream_.userBuffer[1] ) {
8288 free( stream_.userBuffer[1] );
8289 stream_.userBuffer[1] = 0;
8292 stream_.state = STREAM_CLOSED;
8293 stream_.mode = UNINITIALIZED;
8296 void RtApiPulse::callbackEvent( void )
8298 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8300 if ( stream_.state == STREAM_STOPPED ) {
8301 MUTEX_LOCK( &stream_.mutex );
8302 while ( !pah->runnable )
8303 pthread_cond_wait( &pah->runnable_cv, &stream_.mutex );
8305 if ( stream_.state != STREAM_RUNNING ) {
8306 MUTEX_UNLOCK( &stream_.mutex );
8309 MUTEX_UNLOCK( &stream_.mutex );
8312 if ( stream_.state == STREAM_CLOSED ) {
8313 errorText_ = "RtApiPulse::callbackEvent(): the stream is closed ... "
8314 "this shouldn't happen!";
8315 error( RtAudioError::WARNING );
8319 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
8320 double streamTime = getStreamTime();
8321 RtAudioStreamStatus status = 0;
8322 int doStopStream = callback( stream_.userBuffer[OUTPUT], stream_.userBuffer[INPUT],
8323 stream_.bufferSize, streamTime, status,
8324 stream_.callbackInfo.userData );
8326 if ( doStopStream == 2 ) {
8331 MUTEX_LOCK( &stream_.mutex );
8332 void *pulse_in = stream_.doConvertBuffer[INPUT] ? stream_.deviceBuffer : stream_.userBuffer[INPUT];
8333 void *pulse_out = stream_.doConvertBuffer[OUTPUT] ? stream_.deviceBuffer : stream_.userBuffer[OUTPUT];
8335 if ( stream_.state != STREAM_RUNNING )
8340 if (stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8341 if ( stream_.doConvertBuffer[OUTPUT] ) {
8342 convertBuffer( stream_.deviceBuffer,
8343 stream_.userBuffer[OUTPUT],
8344 stream_.convertInfo[OUTPUT] );
8345 bytes = stream_.nDeviceChannels[OUTPUT] * stream_.bufferSize *
8346 formatBytes( stream_.deviceFormat[OUTPUT] );
8348 bytes = stream_.nUserChannels[OUTPUT] * stream_.bufferSize *
8349 formatBytes( stream_.userFormat );
8351 if ( pa_simple_write( pah->s_play, pulse_out, bytes, &pa_error ) < 0 ) {
8352 errorStream_ << "RtApiPulse::callbackEvent: audio write error, " <<
8353 pa_strerror( pa_error ) << ".";
8354 errorText_ = errorStream_.str();
8355 error( RtAudioError::WARNING );
8359 if ( stream_.mode == INPUT || stream_.mode == DUPLEX) {
8360 if ( stream_.doConvertBuffer[INPUT] )
8361 bytes = stream_.nDeviceChannels[INPUT] * stream_.bufferSize *
8362 formatBytes( stream_.deviceFormat[INPUT] );
8364 bytes = stream_.nUserChannels[INPUT] * stream_.bufferSize *
8365 formatBytes( stream_.userFormat );
8367 if ( pa_simple_read( pah->s_rec, pulse_in, bytes, &pa_error ) < 0 ) {
8368 errorStream_ << "RtApiPulse::callbackEvent: audio read error, " <<
8369 pa_strerror( pa_error ) << ".";
8370 errorText_ = errorStream_.str();
8371 error( RtAudioError::WARNING );
8373 if ( stream_.doConvertBuffer[INPUT] ) {
8374 convertBuffer( stream_.userBuffer[INPUT],
8375 stream_.deviceBuffer,
8376 stream_.convertInfo[INPUT] );
8381 MUTEX_UNLOCK( &stream_.mutex );
8382 RtApi::tickStreamTime();
8384 if ( doStopStream == 1 )
8388 void RtApiPulse::startStream( void )
8390 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8392 if ( stream_.state == STREAM_CLOSED ) {
8393 errorText_ = "RtApiPulse::startStream(): the stream is not open!";
8394 error( RtAudioError::INVALID_USE );
8397 if ( stream_.state == STREAM_RUNNING ) {
8398 errorText_ = "RtApiPulse::startStream(): the stream is already running!";
8399 error( RtAudioError::WARNING );
8403 MUTEX_LOCK( &stream_.mutex );
8405 stream_.state = STREAM_RUNNING;
8407 pah->runnable = true;
8408 pthread_cond_signal( &pah->runnable_cv );
8409 MUTEX_UNLOCK( &stream_.mutex );
8412 void RtApiPulse::stopStream( void )
8414 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8416 if ( stream_.state == STREAM_CLOSED ) {
8417 errorText_ = "RtApiPulse::stopStream(): the stream is not open!";
8418 error( RtAudioError::INVALID_USE );
8421 if ( stream_.state == STREAM_STOPPED ) {
8422 errorText_ = "RtApiPulse::stopStream(): the stream is already stopped!";
8423 error( RtAudioError::WARNING );
8427 stream_.state = STREAM_STOPPED;
8428 MUTEX_LOCK( &stream_.mutex );
8430 if ( pah && pah->s_play ) {
8432 if ( pa_simple_drain( pah->s_play, &pa_error ) < 0 ) {
8433 errorStream_ << "RtApiPulse::stopStream: error draining output device, " <<
8434 pa_strerror( pa_error ) << ".";
8435 errorText_ = errorStream_.str();
8436 MUTEX_UNLOCK( &stream_.mutex );
8437 error( RtAudioError::SYSTEM_ERROR );
8442 stream_.state = STREAM_STOPPED;
8443 MUTEX_UNLOCK( &stream_.mutex );
8446 void RtApiPulse::abortStream( void )
8448 PulseAudioHandle *pah = static_cast<PulseAudioHandle*>( stream_.apiHandle );
8450 if ( stream_.state == STREAM_CLOSED ) {
8451 errorText_ = "RtApiPulse::abortStream(): the stream is not open!";
8452 error( RtAudioError::INVALID_USE );
8455 if ( stream_.state == STREAM_STOPPED ) {
8456 errorText_ = "RtApiPulse::abortStream(): the stream is already stopped!";
8457 error( RtAudioError::WARNING );
8461 stream_.state = STREAM_STOPPED;
8462 MUTEX_LOCK( &stream_.mutex );
8464 if ( pah && pah->s_play ) {
8466 if ( pa_simple_flush( pah->s_play, &pa_error ) < 0 ) {
8467 errorStream_ << "RtApiPulse::abortStream: error flushing output device, " <<
8468 pa_strerror( pa_error ) << ".";
8469 errorText_ = errorStream_.str();
8470 MUTEX_UNLOCK( &stream_.mutex );
8471 error( RtAudioError::SYSTEM_ERROR );
8476 stream_.state = STREAM_STOPPED;
8477 MUTEX_UNLOCK( &stream_.mutex );
8480 bool RtApiPulse::probeDeviceOpen( unsigned int device, StreamMode mode,
8481 unsigned int channels, unsigned int firstChannel,
8482 unsigned int sampleRate, RtAudioFormat format,
8483 unsigned int *bufferSize, RtAudio::StreamOptions *options )
8485 PulseAudioHandle *pah = 0;
8486 unsigned long bufferBytes = 0;
8489 if ( device != 0 ) return false;
8490 if ( mode != INPUT && mode != OUTPUT ) return false;
8491 if ( channels != 1 && channels != 2 ) {
8492 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported number of channels.";
8495 ss.channels = channels;
8497 if ( firstChannel != 0 ) return false;
8499 bool sr_found = false;
8500 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr ) {
8501 if ( sampleRate == *sr ) {
8503 stream_.sampleRate = sampleRate;
8504 ss.rate = sampleRate;
8509 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported sample rate.";
8514 for ( const rtaudio_pa_format_mapping_t *sf = supported_sampleformats;
8515 sf->rtaudio_format && sf->pa_format != PA_SAMPLE_INVALID; ++sf ) {
8516 if ( format == sf->rtaudio_format ) {
8518 stream_.userFormat = sf->rtaudio_format;
8519 stream_.deviceFormat[mode] = stream_.userFormat;
8520 ss.format = sf->pa_format;
8524 if ( !sf_found ) { // Use internal data format conversion.
8525 stream_.userFormat = format;
8526 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
8527 ss.format = PA_SAMPLE_FLOAT32LE;
8530 // Set other stream parameters.
8531 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
8532 else stream_.userInterleaved = true;
8533 stream_.deviceInterleaved[mode] = true;
8534 stream_.nBuffers = 1;
8535 stream_.doByteSwap[mode] = false;
8536 stream_.nUserChannels[mode] = channels;
8537 stream_.nDeviceChannels[mode] = channels + firstChannel;
8538 stream_.channelOffset[mode] = 0;
8539 std::string streamName = "RtAudio";
8541 // Set flags for buffer conversion.
8542 stream_.doConvertBuffer[mode] = false;
8543 if ( stream_.userFormat != stream_.deviceFormat[mode] )
8544 stream_.doConvertBuffer[mode] = true;
8545 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
8546 stream_.doConvertBuffer[mode] = true;
8548 // Allocate necessary internal buffers.
8549 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
8550 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
8551 if ( stream_.userBuffer[mode] == NULL ) {
8552 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating user buffer memory.";
8555 stream_.bufferSize = *bufferSize;
8557 if ( stream_.doConvertBuffer[mode] ) {
8559 bool makeBuffer = true;
8560 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
8561 if ( mode == INPUT ) {
8562 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
8563 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
8564 if ( bufferBytes <= bytesOut ) makeBuffer = false;
8569 bufferBytes *= *bufferSize;
8570 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
8571 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
8572 if ( stream_.deviceBuffer == NULL ) {
8573 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating device buffer memory.";
8579 stream_.device[mode] = device;
8581 // Setup the buffer conversion information structure.
8582 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
8584 if ( !stream_.apiHandle ) {
8585 PulseAudioHandle *pah = new PulseAudioHandle;
8587 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating memory for handle.";
8591 stream_.apiHandle = pah;
8592 if ( pthread_cond_init( &pah->runnable_cv, NULL ) != 0 ) {
8593 errorText_ = "RtApiPulse::probeDeviceOpen: error creating condition variable.";
8597 pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8600 if ( options && !options->streamName.empty() ) streamName = options->streamName;
8603 pa_buffer_attr buffer_attr;
8604 buffer_attr.fragsize = bufferBytes;
8605 buffer_attr.maxlength = -1;
8607 pah->s_rec = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_RECORD, NULL, "Record", &ss, NULL, &buffer_attr, &error );
8608 if ( !pah->s_rec ) {
8609 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting input to PulseAudio server.";
8614 pah->s_play = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_PLAYBACK, NULL, "Playback", &ss, NULL, NULL, &error );
8615 if ( !pah->s_play ) {
8616 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting output to PulseAudio server.";
8624 if ( stream_.mode == UNINITIALIZED )
8625 stream_.mode = mode;
8626 else if ( stream_.mode == mode )
8629 stream_.mode = DUPLEX;
8631 if ( !stream_.callbackInfo.isRunning ) {
8632 stream_.callbackInfo.object = this;
8634 stream_.state = STREAM_STOPPED;
8635 // Set the thread attributes for joinable and realtime scheduling
8636 // priority (optional). The higher priority will only take affect
8637 // if the program is run as root or suid. Note, under Linux
8638 // processes with CAP_SYS_NICE privilege, a user can change
8639 // scheduling policy and priority (thus need not be root). See
8640 // POSIX "capabilities".
8641 pthread_attr_t attr;
8642 pthread_attr_init( &attr );
8643 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
8644 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
8645 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
8646 stream_.callbackInfo.doRealtime = true;
8647 struct sched_param param;
8648 int priority = options->priority;
8649 int min = sched_get_priority_min( SCHED_RR );
8650 int max = sched_get_priority_max( SCHED_RR );
8651 if ( priority < min ) priority = min;
8652 else if ( priority > max ) priority = max;
8653 param.sched_priority = priority;
8655 // Set the policy BEFORE the priority. Otherwise it fails.
8656 pthread_attr_setschedpolicy(&attr, SCHED_RR);
8657 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
8658 // This is definitely required. Otherwise it fails.
8659 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
8660 pthread_attr_setschedparam(&attr, ¶m);
8663 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
8665 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
8668 stream_.callbackInfo.isRunning = true;
8669 int result = pthread_create( &pah->thread, &attr, pulseaudio_callback, (void *)&stream_.callbackInfo);
8670 pthread_attr_destroy(&attr);
8672 // Failed. Try instead with default attributes.
8673 result = pthread_create( &pah->thread, NULL, pulseaudio_callback, (void *)&stream_.callbackInfo);
8675 stream_.callbackInfo.isRunning = false;
8676 errorText_ = "RtApiPulse::probeDeviceOpen: error creating thread.";
8685 if ( pah && stream_.callbackInfo.isRunning ) {
8686 pthread_cond_destroy( &pah->runnable_cv );
8688 stream_.apiHandle = 0;
8691 for ( int i=0; i<2; i++ ) {
8692 if ( stream_.userBuffer[i] ) {
8693 free( stream_.userBuffer[i] );
8694 stream_.userBuffer[i] = 0;
8698 if ( stream_.deviceBuffer ) {
8699 free( stream_.deviceBuffer );
8700 stream_.deviceBuffer = 0;
8703 stream_.state = STREAM_CLOSED;
8707 //******************** End of __LINUX_PULSE__ *********************//
8710 #if defined(__LINUX_OSS__)
8713 #include <sys/ioctl.h>
8716 #include <sys/soundcard.h>
8720 static void *ossCallbackHandler(void * ptr);
8722 // A structure to hold various information related to the OSS API
8725 int id[2]; // device ids
8728 pthread_cond_t runnable;
8731 :triggered(false) { id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
8734 RtApiOss :: RtApiOss()
8736 // Nothing to do here.
8739 RtApiOss :: ~RtApiOss()
8741 if ( stream_.state != STREAM_CLOSED ) closeStream();
8744 unsigned int RtApiOss :: getDeviceCount( void )
8746 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
8747 if ( mixerfd == -1 ) {
8748 errorText_ = "RtApiOss::getDeviceCount: error opening '/dev/mixer'.";
8749 error( RtAudioError::WARNING );
8753 oss_sysinfo sysinfo;
8754 if ( ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo ) == -1 ) {
8756 errorText_ = "RtApiOss::getDeviceCount: error getting sysinfo, OSS version >= 4.0 is required.";
8757 error( RtAudioError::WARNING );
8762 return sysinfo.numaudios;
8765 RtAudio::DeviceInfo RtApiOss :: getDeviceInfo( unsigned int device )
8767 RtAudio::DeviceInfo info;
8768 info.probed = false;
8770 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
8771 if ( mixerfd == -1 ) {
8772 errorText_ = "RtApiOss::getDeviceInfo: error opening '/dev/mixer'.";
8773 error( RtAudioError::WARNING );
8777 oss_sysinfo sysinfo;
8778 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
8779 if ( result == -1 ) {
8781 errorText_ = "RtApiOss::getDeviceInfo: error getting sysinfo, OSS version >= 4.0 is required.";
8782 error( RtAudioError::WARNING );
8786 unsigned nDevices = sysinfo.numaudios;
8787 if ( nDevices == 0 ) {
8789 errorText_ = "RtApiOss::getDeviceInfo: no devices found!";
8790 error( RtAudioError::INVALID_USE );
8794 if ( device >= nDevices ) {
8796 errorText_ = "RtApiOss::getDeviceInfo: device ID is invalid!";
8797 error( RtAudioError::INVALID_USE );
8801 oss_audioinfo ainfo;
8803 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
8805 if ( result == -1 ) {
8806 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
8807 errorText_ = errorStream_.str();
8808 error( RtAudioError::WARNING );
8813 if ( ainfo.caps & PCM_CAP_OUTPUT ) info.outputChannels = ainfo.max_channels;
8814 if ( ainfo.caps & PCM_CAP_INPUT ) info.inputChannels = ainfo.max_channels;
8815 if ( ainfo.caps & PCM_CAP_DUPLEX ) {
8816 if ( info.outputChannels > 0 && info.inputChannels > 0 && ainfo.caps & PCM_CAP_DUPLEX )
8817 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
8820 // Probe data formats ... do for input
8821 unsigned long mask = ainfo.iformats;
8822 if ( mask & AFMT_S16_LE || mask & AFMT_S16_BE )
8823 info.nativeFormats |= RTAUDIO_SINT16;
8824 if ( mask & AFMT_S8 )
8825 info.nativeFormats |= RTAUDIO_SINT8;
8826 if ( mask & AFMT_S32_LE || mask & AFMT_S32_BE )
8827 info.nativeFormats |= RTAUDIO_SINT32;
8829 if ( mask & AFMT_FLOAT )
8830 info.nativeFormats |= RTAUDIO_FLOAT32;
8832 if ( mask & AFMT_S24_LE || mask & AFMT_S24_BE )
8833 info.nativeFormats |= RTAUDIO_SINT24;
8835 // Check that we have at least one supported format
8836 if ( info.nativeFormats == 0 ) {
8837 errorStream_ << "RtApiOss::getDeviceInfo: device (" << ainfo.name << ") data format not supported by RtAudio.";
8838 errorText_ = errorStream_.str();
8839 error( RtAudioError::WARNING );
8843 // Probe the supported sample rates.
8844 info.sampleRates.clear();
8845 if ( ainfo.nrates ) {
8846 for ( unsigned int i=0; i<ainfo.nrates; i++ ) {
8847 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
8848 if ( ainfo.rates[i] == SAMPLE_RATES[k] ) {
8849 info.sampleRates.push_back( SAMPLE_RATES[k] );
8851 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
8852 info.preferredSampleRate = SAMPLE_RATES[k];
8860 // Check min and max rate values;
8861 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
8862 if ( ainfo.min_rate <= (int) SAMPLE_RATES[k] && ainfo.max_rate >= (int) SAMPLE_RATES[k] ) {
8863 info.sampleRates.push_back( SAMPLE_RATES[k] );
8865 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
8866 info.preferredSampleRate = SAMPLE_RATES[k];
8871 if ( info.sampleRates.size() == 0 ) {
8872 errorStream_ << "RtApiOss::getDeviceInfo: no supported sample rates found for device (" << ainfo.name << ").";
8873 errorText_ = errorStream_.str();
8874 error( RtAudioError::WARNING );
8878 info.name = ainfo.name;
8885 bool RtApiOss :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
8886 unsigned int firstChannel, unsigned int sampleRate,
8887 RtAudioFormat format, unsigned int *bufferSize,
8888 RtAudio::StreamOptions *options )
8890 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
8891 if ( mixerfd == -1 ) {
8892 errorText_ = "RtApiOss::probeDeviceOpen: error opening '/dev/mixer'.";
8896 oss_sysinfo sysinfo;
8897 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
8898 if ( result == -1 ) {
8900 errorText_ = "RtApiOss::probeDeviceOpen: error getting sysinfo, OSS version >= 4.0 is required.";
8904 unsigned nDevices = sysinfo.numaudios;
8905 if ( nDevices == 0 ) {
8906 // This should not happen because a check is made before this function is called.
8908 errorText_ = "RtApiOss::probeDeviceOpen: no devices found!";
8912 if ( device >= nDevices ) {
8913 // This should not happen because a check is made before this function is called.
8915 errorText_ = "RtApiOss::probeDeviceOpen: device ID is invalid!";
8919 oss_audioinfo ainfo;
8921 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
8923 if ( result == -1 ) {
8924 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
8925 errorText_ = errorStream_.str();
8929 // Check if device supports input or output
8930 if ( ( mode == OUTPUT && !( ainfo.caps & PCM_CAP_OUTPUT ) ) ||
8931 ( mode == INPUT && !( ainfo.caps & PCM_CAP_INPUT ) ) ) {
8932 if ( mode == OUTPUT )
8933 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support output.";
8935 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support input.";
8936 errorText_ = errorStream_.str();
8941 OssHandle *handle = (OssHandle *) stream_.apiHandle;
8942 if ( mode == OUTPUT )
8944 else { // mode == INPUT
8945 if (stream_.mode == OUTPUT && stream_.device[0] == device) {
8946 // We just set the same device for playback ... close and reopen for duplex (OSS only).
8947 close( handle->id[0] );
8949 if ( !( ainfo.caps & PCM_CAP_DUPLEX ) ) {
8950 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support duplex mode.";
8951 errorText_ = errorStream_.str();
8954 // Check that the number previously set channels is the same.
8955 if ( stream_.nUserChannels[0] != channels ) {
8956 errorStream_ << "RtApiOss::probeDeviceOpen: input/output channels must be equal for OSS duplex device (" << ainfo.name << ").";
8957 errorText_ = errorStream_.str();
8966 // Set exclusive access if specified.
8967 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) flags |= O_EXCL;
8969 // Try to open the device.
8971 fd = open( ainfo.devnode, flags, 0 );
8973 if ( errno == EBUSY )
8974 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") is busy.";
8976 errorStream_ << "RtApiOss::probeDeviceOpen: error opening device (" << ainfo.name << ").";
8977 errorText_ = errorStream_.str();
8981 // For duplex operation, specifically set this mode (this doesn't seem to work).
8983 if ( flags | O_RDWR ) {
8984 result = ioctl( fd, SNDCTL_DSP_SETDUPLEX, NULL );
8985 if ( result == -1) {
8986 errorStream_ << "RtApiOss::probeDeviceOpen: error setting duplex mode for device (" << ainfo.name << ").";
8987 errorText_ = errorStream_.str();
8993 // Check the device channel support.
8994 stream_.nUserChannels[mode] = channels;
8995 if ( ainfo.max_channels < (int)(channels + firstChannel) ) {
8997 errorStream_ << "RtApiOss::probeDeviceOpen: the device (" << ainfo.name << ") does not support requested channel parameters.";
8998 errorText_ = errorStream_.str();
9002 // Set the number of channels.
9003 int deviceChannels = channels + firstChannel;
9004 result = ioctl( fd, SNDCTL_DSP_CHANNELS, &deviceChannels );
9005 if ( result == -1 || deviceChannels < (int)(channels + firstChannel) ) {
9007 errorStream_ << "RtApiOss::probeDeviceOpen: error setting channel parameters on device (" << ainfo.name << ").";
9008 errorText_ = errorStream_.str();
9011 stream_.nDeviceChannels[mode] = deviceChannels;
9013 // Get the data format mask
9015 result = ioctl( fd, SNDCTL_DSP_GETFMTS, &mask );
9016 if ( result == -1 ) {
9018 errorStream_ << "RtApiOss::probeDeviceOpen: error getting device (" << ainfo.name << ") data formats.";
9019 errorText_ = errorStream_.str();
9023 // Determine how to set the device format.
9024 stream_.userFormat = format;
9025 int deviceFormat = -1;
9026 stream_.doByteSwap[mode] = false;
9027 if ( format == RTAUDIO_SINT8 ) {
9028 if ( mask & AFMT_S8 ) {
9029 deviceFormat = AFMT_S8;
9030 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
9033 else if ( format == RTAUDIO_SINT16 ) {
9034 if ( mask & AFMT_S16_NE ) {
9035 deviceFormat = AFMT_S16_NE;
9036 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9038 else if ( mask & AFMT_S16_OE ) {
9039 deviceFormat = AFMT_S16_OE;
9040 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9041 stream_.doByteSwap[mode] = true;
9044 else if ( format == RTAUDIO_SINT24 ) {
9045 if ( mask & AFMT_S24_NE ) {
9046 deviceFormat = AFMT_S24_NE;
9047 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9049 else if ( mask & AFMT_S24_OE ) {
9050 deviceFormat = AFMT_S24_OE;
9051 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9052 stream_.doByteSwap[mode] = true;
9055 else if ( format == RTAUDIO_SINT32 ) {
9056 if ( mask & AFMT_S32_NE ) {
9057 deviceFormat = AFMT_S32_NE;
9058 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9060 else if ( mask & AFMT_S32_OE ) {
9061 deviceFormat = AFMT_S32_OE;
9062 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9063 stream_.doByteSwap[mode] = true;
9067 if ( deviceFormat == -1 ) {
9068 // The user requested format is not natively supported by the device.
9069 if ( mask & AFMT_S16_NE ) {
9070 deviceFormat = AFMT_S16_NE;
9071 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9073 else if ( mask & AFMT_S32_NE ) {
9074 deviceFormat = AFMT_S32_NE;
9075 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9077 else if ( mask & AFMT_S24_NE ) {
9078 deviceFormat = AFMT_S24_NE;
9079 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9081 else if ( mask & AFMT_S16_OE ) {
9082 deviceFormat = AFMT_S16_OE;
9083 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9084 stream_.doByteSwap[mode] = true;
9086 else if ( mask & AFMT_S32_OE ) {
9087 deviceFormat = AFMT_S32_OE;
9088 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9089 stream_.doByteSwap[mode] = true;
9091 else if ( mask & AFMT_S24_OE ) {
9092 deviceFormat = AFMT_S24_OE;
9093 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9094 stream_.doByteSwap[mode] = true;
9096 else if ( mask & AFMT_S8) {
9097 deviceFormat = AFMT_S8;
9098 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
9102 if ( stream_.deviceFormat[mode] == 0 ) {
9103 // This really shouldn't happen ...
9105 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") data format not supported by RtAudio.";
9106 errorText_ = errorStream_.str();
9110 // Set the data format.
9111 int temp = deviceFormat;
9112 result = ioctl( fd, SNDCTL_DSP_SETFMT, &deviceFormat );
9113 if ( result == -1 || deviceFormat != temp ) {
9115 errorStream_ << "RtApiOss::probeDeviceOpen: error setting data format on device (" << ainfo.name << ").";
9116 errorText_ = errorStream_.str();
9120 // Attempt to set the buffer size. According to OSS, the minimum
9121 // number of buffers is two. The supposed minimum buffer size is 16
9122 // bytes, so that will be our lower bound. The argument to this
9123 // call is in the form 0xMMMMSSSS (hex), where the buffer size (in
9124 // bytes) is given as 2^SSSS and the number of buffers as 2^MMMM.
9125 // We'll check the actual value used near the end of the setup
9127 int ossBufferBytes = *bufferSize * formatBytes( stream_.deviceFormat[mode] ) * deviceChannels;
9128 if ( ossBufferBytes < 16 ) ossBufferBytes = 16;
9130 if ( options ) buffers = options->numberOfBuffers;
9131 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) buffers = 2;
9132 if ( buffers < 2 ) buffers = 3;
9133 temp = ((int) buffers << 16) + (int)( log10( (double)ossBufferBytes ) / log10( 2.0 ) );
9134 result = ioctl( fd, SNDCTL_DSP_SETFRAGMENT, &temp );
9135 if ( result == -1 ) {
9137 errorStream_ << "RtApiOss::probeDeviceOpen: error setting buffer size on device (" << ainfo.name << ").";
9138 errorText_ = errorStream_.str();
9141 stream_.nBuffers = buffers;
9143 // Save buffer size (in sample frames).
9144 *bufferSize = ossBufferBytes / ( formatBytes(stream_.deviceFormat[mode]) * deviceChannels );
9145 stream_.bufferSize = *bufferSize;
9147 // Set the sample rate.
9148 int srate = sampleRate;
9149 result = ioctl( fd, SNDCTL_DSP_SPEED, &srate );
9150 if ( result == -1 ) {
9152 errorStream_ << "RtApiOss::probeDeviceOpen: error setting sample rate (" << sampleRate << ") on device (" << ainfo.name << ").";
9153 errorText_ = errorStream_.str();
9157 // Verify the sample rate setup worked.
9158 if ( abs( srate - (int)sampleRate ) > 100 ) {
9160 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support sample rate (" << sampleRate << ").";
9161 errorText_ = errorStream_.str();
9164 stream_.sampleRate = sampleRate;
9166 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device) {
9167 // We're doing duplex setup here.
9168 stream_.deviceFormat[0] = stream_.deviceFormat[1];
9169 stream_.nDeviceChannels[0] = deviceChannels;
9172 // Set interleaving parameters.
9173 stream_.userInterleaved = true;
9174 stream_.deviceInterleaved[mode] = true;
9175 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
9176 stream_.userInterleaved = false;
9178 // Set flags for buffer conversion
9179 stream_.doConvertBuffer[mode] = false;
9180 if ( stream_.userFormat != stream_.deviceFormat[mode] )
9181 stream_.doConvertBuffer[mode] = true;
9182 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
9183 stream_.doConvertBuffer[mode] = true;
9184 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
9185 stream_.nUserChannels[mode] > 1 )
9186 stream_.doConvertBuffer[mode] = true;
9188 // Allocate the stream handles if necessary and then save.
9189 if ( stream_.apiHandle == 0 ) {
9191 handle = new OssHandle;
9193 catch ( std::bad_alloc& ) {
9194 errorText_ = "RtApiOss::probeDeviceOpen: error allocating OssHandle memory.";
9198 if ( pthread_cond_init( &handle->runnable, NULL ) ) {
9199 errorText_ = "RtApiOss::probeDeviceOpen: error initializing pthread condition variable.";
9203 stream_.apiHandle = (void *) handle;
9206 handle = (OssHandle *) stream_.apiHandle;
9208 handle->id[mode] = fd;
9210 // Allocate necessary internal buffers.
9211 unsigned long bufferBytes;
9212 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
9213 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
9214 if ( stream_.userBuffer[mode] == NULL ) {
9215 errorText_ = "RtApiOss::probeDeviceOpen: error allocating user buffer memory.";
9219 if ( stream_.doConvertBuffer[mode] ) {
9221 bool makeBuffer = true;
9222 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
9223 if ( mode == INPUT ) {
9224 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
9225 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
9226 if ( bufferBytes <= bytesOut ) makeBuffer = false;
9231 bufferBytes *= *bufferSize;
9232 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
9233 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
9234 if ( stream_.deviceBuffer == NULL ) {
9235 errorText_ = "RtApiOss::probeDeviceOpen: error allocating device buffer memory.";
9241 stream_.device[mode] = device;
9242 stream_.state = STREAM_STOPPED;
9244 // Setup the buffer conversion information structure.
9245 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
9247 // Setup thread if necessary.
9248 if ( stream_.mode == OUTPUT && mode == INPUT ) {
9249 // We had already set up an output stream.
9250 stream_.mode = DUPLEX;
9251 if ( stream_.device[0] == device ) handle->id[0] = fd;
9254 stream_.mode = mode;
9256 // Setup callback thread.
9257 stream_.callbackInfo.object = (void *) this;
9259 // Set the thread attributes for joinable and realtime scheduling
9260 // priority. The higher priority will only take affect if the
9261 // program is run as root or suid.
9262 pthread_attr_t attr;
9263 pthread_attr_init( &attr );
9264 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
9265 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
9266 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
9267 stream_.callbackInfo.doRealtime = true;
9268 struct sched_param param;
9269 int priority = options->priority;
9270 int min = sched_get_priority_min( SCHED_RR );
9271 int max = sched_get_priority_max( SCHED_RR );
9272 if ( priority < min ) priority = min;
9273 else if ( priority > max ) priority = max;
9274 param.sched_priority = priority;
9276 // Set the policy BEFORE the priority. Otherwise it fails.
9277 pthread_attr_setschedpolicy(&attr, SCHED_RR);
9278 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
9279 // This is definitely required. Otherwise it fails.
9280 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
9281 pthread_attr_setschedparam(&attr, ¶m);
9284 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
9286 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
9289 stream_.callbackInfo.isRunning = true;
9290 result = pthread_create( &stream_.callbackInfo.thread, &attr, ossCallbackHandler, &stream_.callbackInfo );
9291 pthread_attr_destroy( &attr );
9293 // Failed. Try instead with default attributes.
9294 result = pthread_create( &stream_.callbackInfo.thread, NULL, ossCallbackHandler, &stream_.callbackInfo );
9296 stream_.callbackInfo.isRunning = false;
9297 errorText_ = "RtApiOss::error creating callback thread!";
9307 pthread_cond_destroy( &handle->runnable );
9308 if ( handle->id[0] ) close( handle->id[0] );
9309 if ( handle->id[1] ) close( handle->id[1] );
9311 stream_.apiHandle = 0;
9314 for ( int i=0; i<2; i++ ) {
9315 if ( stream_.userBuffer[i] ) {
9316 free( stream_.userBuffer[i] );
9317 stream_.userBuffer[i] = 0;
9321 if ( stream_.deviceBuffer ) {
9322 free( stream_.deviceBuffer );
9323 stream_.deviceBuffer = 0;
9326 stream_.state = STREAM_CLOSED;
9330 void RtApiOss :: closeStream()
9332 if ( stream_.state == STREAM_CLOSED ) {
9333 errorText_ = "RtApiOss::closeStream(): no open stream to close!";
9334 error( RtAudioError::WARNING );
9338 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9339 stream_.callbackInfo.isRunning = false;
9340 MUTEX_LOCK( &stream_.mutex );
9341 if ( stream_.state == STREAM_STOPPED )
9342 pthread_cond_signal( &handle->runnable );
9343 MUTEX_UNLOCK( &stream_.mutex );
9344 pthread_join( stream_.callbackInfo.thread, NULL );
9346 if ( stream_.state == STREAM_RUNNING ) {
9347 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
9348 ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9350 ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9351 stream_.state = STREAM_STOPPED;
9355 pthread_cond_destroy( &handle->runnable );
9356 if ( handle->id[0] ) close( handle->id[0] );
9357 if ( handle->id[1] ) close( handle->id[1] );
9359 stream_.apiHandle = 0;
9362 for ( int i=0; i<2; i++ ) {
9363 if ( stream_.userBuffer[i] ) {
9364 free( stream_.userBuffer[i] );
9365 stream_.userBuffer[i] = 0;
9369 if ( stream_.deviceBuffer ) {
9370 free( stream_.deviceBuffer );
9371 stream_.deviceBuffer = 0;
9374 stream_.mode = UNINITIALIZED;
9375 stream_.state = STREAM_CLOSED;
9378 void RtApiOss :: startStream()
9381 if ( stream_.state == STREAM_RUNNING ) {
9382 errorText_ = "RtApiOss::startStream(): the stream is already running!";
9383 error( RtAudioError::WARNING );
9387 MUTEX_LOCK( &stream_.mutex );
9389 stream_.state = STREAM_RUNNING;
9391 // No need to do anything else here ... OSS automatically starts
9392 // when fed samples.
9394 MUTEX_UNLOCK( &stream_.mutex );
9396 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9397 pthread_cond_signal( &handle->runnable );
9400 void RtApiOss :: stopStream()
9403 if ( stream_.state == STREAM_STOPPED ) {
9404 errorText_ = "RtApiOss::stopStream(): the stream is already stopped!";
9405 error( RtAudioError::WARNING );
9409 MUTEX_LOCK( &stream_.mutex );
9411 // The state might change while waiting on a mutex.
9412 if ( stream_.state == STREAM_STOPPED ) {
9413 MUTEX_UNLOCK( &stream_.mutex );
9418 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9419 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9421 // Flush the output with zeros a few times.
9424 RtAudioFormat format;
9426 if ( stream_.doConvertBuffer[0] ) {
9427 buffer = stream_.deviceBuffer;
9428 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
9429 format = stream_.deviceFormat[0];
9432 buffer = stream_.userBuffer[0];
9433 samples = stream_.bufferSize * stream_.nUserChannels[0];
9434 format = stream_.userFormat;
9437 memset( buffer, 0, samples * formatBytes(format) );
9438 for ( unsigned int i=0; i<stream_.nBuffers+1; i++ ) {
9439 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9440 if ( result == -1 ) {
9441 errorText_ = "RtApiOss::stopStream: audio write error.";
9442 error( RtAudioError::WARNING );
9446 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9447 if ( result == -1 ) {
9448 errorStream_ << "RtApiOss::stopStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
9449 errorText_ = errorStream_.str();
9452 handle->triggered = false;
9455 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
9456 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9457 if ( result == -1 ) {
9458 errorStream_ << "RtApiOss::stopStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
9459 errorText_ = errorStream_.str();
9465 stream_.state = STREAM_STOPPED;
9466 MUTEX_UNLOCK( &stream_.mutex );
9468 if ( result != -1 ) return;
9469 error( RtAudioError::SYSTEM_ERROR );
9472 void RtApiOss :: abortStream()
9475 if ( stream_.state == STREAM_STOPPED ) {
9476 errorText_ = "RtApiOss::abortStream(): the stream is already stopped!";
9477 error( RtAudioError::WARNING );
9481 MUTEX_LOCK( &stream_.mutex );
9483 // The state might change while waiting on a mutex.
9484 if ( stream_.state == STREAM_STOPPED ) {
9485 MUTEX_UNLOCK( &stream_.mutex );
9490 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9491 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9492 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9493 if ( result == -1 ) {
9494 errorStream_ << "RtApiOss::abortStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
9495 errorText_ = errorStream_.str();
9498 handle->triggered = false;
9501 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
9502 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9503 if ( result == -1 ) {
9504 errorStream_ << "RtApiOss::abortStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
9505 errorText_ = errorStream_.str();
9511 stream_.state = STREAM_STOPPED;
9512 MUTEX_UNLOCK( &stream_.mutex );
9514 if ( result != -1 ) return;
9515 error( RtAudioError::SYSTEM_ERROR );
9518 void RtApiOss :: callbackEvent()
9520 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9521 if ( stream_.state == STREAM_STOPPED ) {
9522 MUTEX_LOCK( &stream_.mutex );
9523 pthread_cond_wait( &handle->runnable, &stream_.mutex );
9524 if ( stream_.state != STREAM_RUNNING ) {
9525 MUTEX_UNLOCK( &stream_.mutex );
9528 MUTEX_UNLOCK( &stream_.mutex );
9531 if ( stream_.state == STREAM_CLOSED ) {
9532 errorText_ = "RtApiOss::callbackEvent(): the stream is closed ... this shouldn't happen!";
9533 error( RtAudioError::WARNING );
9537 // Invoke user callback to get fresh output data.
9538 int doStopStream = 0;
9539 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
9540 double streamTime = getStreamTime();
9541 RtAudioStreamStatus status = 0;
9542 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
9543 status |= RTAUDIO_OUTPUT_UNDERFLOW;
9544 handle->xrun[0] = false;
9546 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
9547 status |= RTAUDIO_INPUT_OVERFLOW;
9548 handle->xrun[1] = false;
9550 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
9551 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
9552 if ( doStopStream == 2 ) {
9553 this->abortStream();
9557 MUTEX_LOCK( &stream_.mutex );
9559 // The state might change while waiting on a mutex.
9560 if ( stream_.state == STREAM_STOPPED ) goto unlock;
9565 RtAudioFormat format;
9567 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9569 // Setup parameters and do buffer conversion if necessary.
9570 if ( stream_.doConvertBuffer[0] ) {
9571 buffer = stream_.deviceBuffer;
9572 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
9573 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
9574 format = stream_.deviceFormat[0];
9577 buffer = stream_.userBuffer[0];
9578 samples = stream_.bufferSize * stream_.nUserChannels[0];
9579 format = stream_.userFormat;
9582 // Do byte swapping if necessary.
9583 if ( stream_.doByteSwap[0] )
9584 byteSwapBuffer( buffer, samples, format );
9586 if ( stream_.mode == DUPLEX && handle->triggered == false ) {
9588 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
9589 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9590 trig = PCM_ENABLE_INPUT|PCM_ENABLE_OUTPUT;
9591 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
9592 handle->triggered = true;
9595 // Write samples to device.
9596 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9598 if ( result == -1 ) {
9599 // We'll assume this is an underrun, though there isn't a
9600 // specific means for determining that.
9601 handle->xrun[0] = true;
9602 errorText_ = "RtApiOss::callbackEvent: audio write error.";
9603 error( RtAudioError::WARNING );
9604 // Continue on to input section.
9608 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
9610 // Setup parameters.
9611 if ( stream_.doConvertBuffer[1] ) {
9612 buffer = stream_.deviceBuffer;
9613 samples = stream_.bufferSize * stream_.nDeviceChannels[1];
9614 format = stream_.deviceFormat[1];
9617 buffer = stream_.userBuffer[1];
9618 samples = stream_.bufferSize * stream_.nUserChannels[1];
9619 format = stream_.userFormat;
9622 // Read samples from device.
9623 result = read( handle->id[1], buffer, samples * formatBytes(format) );
9625 if ( result == -1 ) {
9626 // We'll assume this is an overrun, though there isn't a
9627 // specific means for determining that.
9628 handle->xrun[1] = true;
9629 errorText_ = "RtApiOss::callbackEvent: audio read error.";
9630 error( RtAudioError::WARNING );
9634 // Do byte swapping if necessary.
9635 if ( stream_.doByteSwap[1] )
9636 byteSwapBuffer( buffer, samples, format );
9638 // Do buffer conversion if necessary.
9639 if ( stream_.doConvertBuffer[1] )
9640 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
9644 MUTEX_UNLOCK( &stream_.mutex );
9646 RtApi::tickStreamTime();
9647 if ( doStopStream == 1 ) this->stopStream();
9650 static void *ossCallbackHandler( void *ptr )
9652 CallbackInfo *info = (CallbackInfo *) ptr;
9653 RtApiOss *object = (RtApiOss *) info->object;
9654 bool *isRunning = &info->isRunning;
9656 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
9657 if (info->doRealtime) {
9658 std::cerr << "RtAudio oss: " <<
9659 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
9660 "running realtime scheduling" << std::endl;
9664 while ( *isRunning == true ) {
9665 pthread_testcancel();
9666 object->callbackEvent();
9669 pthread_exit( NULL );
9672 //******************** End of __LINUX_OSS__ *********************//
9676 // *************************************************** //
9678 // Protected common (OS-independent) RtAudio methods.
9680 // *************************************************** //
9682 // This method can be modified to control the behavior of error
9683 // message printing.
9684 void RtApi :: error( RtAudioError::Type type )
9686 errorStream_.str(""); // clear the ostringstream
9688 RtAudioErrorCallback errorCallback = (RtAudioErrorCallback) stream_.callbackInfo.errorCallback;
9689 if ( errorCallback ) {
9690 // abortStream() can generate new error messages. Ignore them. Just keep original one.
9692 if ( firstErrorOccurred_ )
9695 firstErrorOccurred_ = true;
9696 const std::string errorMessage = errorText_;
9698 if ( type != RtAudioError::WARNING && stream_.state != STREAM_STOPPED) {
9699 stream_.callbackInfo.isRunning = false; // exit from the thread
9703 errorCallback( type, errorMessage );
9704 firstErrorOccurred_ = false;
9708 if ( type == RtAudioError::WARNING && showWarnings_ == true )
9709 std::cerr << '\n' << errorText_ << "\n\n";
9710 else if ( type != RtAudioError::WARNING )
9711 throw( RtAudioError( errorText_, type ) );
9714 void RtApi :: verifyStream()
9716 if ( stream_.state == STREAM_CLOSED ) {
9717 errorText_ = "RtApi:: a stream is not open!";
9718 error( RtAudioError::INVALID_USE );
9722 void RtApi :: clearStreamInfo()
9724 stream_.mode = UNINITIALIZED;
9725 stream_.state = STREAM_CLOSED;
9726 stream_.sampleRate = 0;
9727 stream_.bufferSize = 0;
9728 stream_.nBuffers = 0;
9729 stream_.userFormat = 0;
9730 stream_.userInterleaved = true;
9731 stream_.streamTime = 0.0;
9732 stream_.apiHandle = 0;
9733 stream_.deviceBuffer = 0;
9734 stream_.callbackInfo.callback = 0;
9735 stream_.callbackInfo.userData = 0;
9736 stream_.callbackInfo.isRunning = false;
9737 stream_.callbackInfo.errorCallback = 0;
9738 for ( int i=0; i<2; i++ ) {
9739 stream_.device[i] = 11111;
9740 stream_.doConvertBuffer[i] = false;
9741 stream_.deviceInterleaved[i] = true;
9742 stream_.doByteSwap[i] = false;
9743 stream_.nUserChannels[i] = 0;
9744 stream_.nDeviceChannels[i] = 0;
9745 stream_.channelOffset[i] = 0;
9746 stream_.deviceFormat[i] = 0;
9747 stream_.latency[i] = 0;
9748 stream_.userBuffer[i] = 0;
9749 stream_.convertInfo[i].channels = 0;
9750 stream_.convertInfo[i].inJump = 0;
9751 stream_.convertInfo[i].outJump = 0;
9752 stream_.convertInfo[i].inFormat = 0;
9753 stream_.convertInfo[i].outFormat = 0;
9754 stream_.convertInfo[i].inOffset.clear();
9755 stream_.convertInfo[i].outOffset.clear();
9759 unsigned int RtApi :: formatBytes( RtAudioFormat format )
9761 if ( format == RTAUDIO_SINT16 )
9763 else if ( format == RTAUDIO_SINT32 || format == RTAUDIO_FLOAT32 )
9765 else if ( format == RTAUDIO_FLOAT64 )
9767 else if ( format == RTAUDIO_SINT24 )
9769 else if ( format == RTAUDIO_SINT8 )
9772 errorText_ = "RtApi::formatBytes: undefined format.";
9773 error( RtAudioError::WARNING );
9778 void RtApi :: setConvertInfo( StreamMode mode, unsigned int firstChannel )
9780 if ( mode == INPUT ) { // convert device to user buffer
9781 stream_.convertInfo[mode].inJump = stream_.nDeviceChannels[1];
9782 stream_.convertInfo[mode].outJump = stream_.nUserChannels[1];
9783 stream_.convertInfo[mode].inFormat = stream_.deviceFormat[1];
9784 stream_.convertInfo[mode].outFormat = stream_.userFormat;
9786 else { // convert user to device buffer
9787 stream_.convertInfo[mode].inJump = stream_.nUserChannels[0];
9788 stream_.convertInfo[mode].outJump = stream_.nDeviceChannels[0];
9789 stream_.convertInfo[mode].inFormat = stream_.userFormat;
9790 stream_.convertInfo[mode].outFormat = stream_.deviceFormat[0];
9793 if ( stream_.convertInfo[mode].inJump < stream_.convertInfo[mode].outJump )
9794 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].inJump;
9796 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].outJump;
9798 // Set up the interleave/deinterleave offsets.
9799 if ( stream_.deviceInterleaved[mode] != stream_.userInterleaved ) {
9800 if ( ( mode == OUTPUT && stream_.deviceInterleaved[mode] ) ||
9801 ( mode == INPUT && stream_.userInterleaved ) ) {
9802 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
9803 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
9804 stream_.convertInfo[mode].outOffset.push_back( k );
9805 stream_.convertInfo[mode].inJump = 1;
9809 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
9810 stream_.convertInfo[mode].inOffset.push_back( k );
9811 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
9812 stream_.convertInfo[mode].outJump = 1;
9816 else { // no (de)interleaving
9817 if ( stream_.userInterleaved ) {
9818 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
9819 stream_.convertInfo[mode].inOffset.push_back( k );
9820 stream_.convertInfo[mode].outOffset.push_back( k );
9824 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
9825 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
9826 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
9827 stream_.convertInfo[mode].inJump = 1;
9828 stream_.convertInfo[mode].outJump = 1;
9833 // Add channel offset.
9834 if ( firstChannel > 0 ) {
9835 if ( stream_.deviceInterleaved[mode] ) {
9836 if ( mode == OUTPUT ) {
9837 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
9838 stream_.convertInfo[mode].outOffset[k] += firstChannel;
9841 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
9842 stream_.convertInfo[mode].inOffset[k] += firstChannel;
9846 if ( mode == OUTPUT ) {
9847 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
9848 stream_.convertInfo[mode].outOffset[k] += ( firstChannel * stream_.bufferSize );
9851 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
9852 stream_.convertInfo[mode].inOffset[k] += ( firstChannel * stream_.bufferSize );
9858 void RtApi :: convertBuffer( char *outBuffer, char *inBuffer, ConvertInfo &info )
9860 // This function does format conversion, input/output channel compensation, and
9861 // data interleaving/deinterleaving. 24-bit integers are assumed to occupy
9862 // the lower three bytes of a 32-bit integer.
9864 // Clear our device buffer when in/out duplex device channels are different
9865 if ( outBuffer == stream_.deviceBuffer && stream_.mode == DUPLEX &&
9866 ( stream_.nDeviceChannels[0] < stream_.nDeviceChannels[1] ) )
9867 memset( outBuffer, 0, stream_.bufferSize * info.outJump * formatBytes( info.outFormat ) );
9870 if (info.outFormat == RTAUDIO_FLOAT64) {
9872 Float64 *out = (Float64 *)outBuffer;
9874 if (info.inFormat == RTAUDIO_SINT8) {
9875 signed char *in = (signed char *)inBuffer;
9876 scale = 1.0 / 127.5;
9877 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9878 for (j=0; j<info.channels; j++) {
9879 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
9880 out[info.outOffset[j]] += 0.5;
9881 out[info.outOffset[j]] *= scale;
9884 out += info.outJump;
9887 else if (info.inFormat == RTAUDIO_SINT16) {
9888 Int16 *in = (Int16 *)inBuffer;
9889 scale = 1.0 / 32767.5;
9890 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9891 for (j=0; j<info.channels; j++) {
9892 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
9893 out[info.outOffset[j]] += 0.5;
9894 out[info.outOffset[j]] *= scale;
9897 out += info.outJump;
9900 else if (info.inFormat == RTAUDIO_SINT24) {
9901 Int24 *in = (Int24 *)inBuffer;
9902 scale = 1.0 / 8388607.5;
9903 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9904 for (j=0; j<info.channels; j++) {
9905 out[info.outOffset[j]] = (Float64) (in[info.inOffset[j]].asInt());
9906 out[info.outOffset[j]] += 0.5;
9907 out[info.outOffset[j]] *= scale;
9910 out += info.outJump;
9913 else if (info.inFormat == RTAUDIO_SINT32) {
9914 Int32 *in = (Int32 *)inBuffer;
9915 scale = 1.0 / 2147483647.5;
9916 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9917 for (j=0; j<info.channels; j++) {
9918 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
9919 out[info.outOffset[j]] += 0.5;
9920 out[info.outOffset[j]] *= scale;
9923 out += info.outJump;
9926 else if (info.inFormat == RTAUDIO_FLOAT32) {
9927 Float32 *in = (Float32 *)inBuffer;
9928 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9929 for (j=0; j<info.channels; j++) {
9930 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
9933 out += info.outJump;
9936 else if (info.inFormat == RTAUDIO_FLOAT64) {
9937 // Channel compensation and/or (de)interleaving only.
9938 Float64 *in = (Float64 *)inBuffer;
9939 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9940 for (j=0; j<info.channels; j++) {
9941 out[info.outOffset[j]] = in[info.inOffset[j]];
9944 out += info.outJump;
9948 else if (info.outFormat == RTAUDIO_FLOAT32) {
9950 Float32 *out = (Float32 *)outBuffer;
9952 if (info.inFormat == RTAUDIO_SINT8) {
9953 signed char *in = (signed char *)inBuffer;
9954 scale = (Float32) ( 1.0 / 127.5 );
9955 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9956 for (j=0; j<info.channels; j++) {
9957 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
9958 out[info.outOffset[j]] += 0.5;
9959 out[info.outOffset[j]] *= scale;
9962 out += info.outJump;
9965 else if (info.inFormat == RTAUDIO_SINT16) {
9966 Int16 *in = (Int16 *)inBuffer;
9967 scale = (Float32) ( 1.0 / 32767.5 );
9968 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9969 for (j=0; j<info.channels; j++) {
9970 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
9971 out[info.outOffset[j]] += 0.5;
9972 out[info.outOffset[j]] *= scale;
9975 out += info.outJump;
9978 else if (info.inFormat == RTAUDIO_SINT24) {
9979 Int24 *in = (Int24 *)inBuffer;
9980 scale = (Float32) ( 1.0 / 8388607.5 );
9981 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9982 for (j=0; j<info.channels; j++) {
9983 out[info.outOffset[j]] = (Float32) (in[info.inOffset[j]].asInt());
9984 out[info.outOffset[j]] += 0.5;
9985 out[info.outOffset[j]] *= scale;
9988 out += info.outJump;
9991 else if (info.inFormat == RTAUDIO_SINT32) {
9992 Int32 *in = (Int32 *)inBuffer;
9993 scale = (Float32) ( 1.0 / 2147483647.5 );
9994 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9995 for (j=0; j<info.channels; j++) {
9996 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
9997 out[info.outOffset[j]] += 0.5;
9998 out[info.outOffset[j]] *= scale;
10001 out += info.outJump;
10004 else if (info.inFormat == RTAUDIO_FLOAT32) {
10005 // Channel compensation and/or (de)interleaving only.
10006 Float32 *in = (Float32 *)inBuffer;
10007 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10008 for (j=0; j<info.channels; j++) {
10009 out[info.outOffset[j]] = in[info.inOffset[j]];
10012 out += info.outJump;
10015 else if (info.inFormat == RTAUDIO_FLOAT64) {
10016 Float64 *in = (Float64 *)inBuffer;
10017 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10018 for (j=0; j<info.channels; j++) {
10019 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10022 out += info.outJump;
10026 else if (info.outFormat == RTAUDIO_SINT32) {
10027 Int32 *out = (Int32 *)outBuffer;
10028 if (info.inFormat == RTAUDIO_SINT8) {
10029 signed char *in = (signed char *)inBuffer;
10030 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10031 for (j=0; j<info.channels; j++) {
10032 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
10033 out[info.outOffset[j]] <<= 24;
10036 out += info.outJump;
10039 else if (info.inFormat == RTAUDIO_SINT16) {
10040 Int16 *in = (Int16 *)inBuffer;
10041 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10042 for (j=0; j<info.channels; j++) {
10043 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
10044 out[info.outOffset[j]] <<= 16;
10047 out += info.outJump;
10050 else if (info.inFormat == RTAUDIO_SINT24) {
10051 Int24 *in = (Int24 *)inBuffer;
10052 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10053 for (j=0; j<info.channels; j++) {
10054 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]].asInt();
10055 out[info.outOffset[j]] <<= 8;
10058 out += info.outJump;
10061 else if (info.inFormat == RTAUDIO_SINT32) {
10062 // Channel compensation and/or (de)interleaving only.
10063 Int32 *in = (Int32 *)inBuffer;
10064 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10065 for (j=0; j<info.channels; j++) {
10066 out[info.outOffset[j]] = in[info.inOffset[j]];
10069 out += info.outJump;
10072 else if (info.inFormat == RTAUDIO_FLOAT32) {
10073 Float32 *in = (Float32 *)inBuffer;
10074 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10075 for (j=0; j<info.channels; j++) {
10076 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
10079 out += info.outJump;
10082 else if (info.inFormat == RTAUDIO_FLOAT64) {
10083 Float64 *in = (Float64 *)inBuffer;
10084 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10085 for (j=0; j<info.channels; j++) {
10086 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
10089 out += info.outJump;
10093 else if (info.outFormat == RTAUDIO_SINT24) {
10094 Int24 *out = (Int24 *)outBuffer;
10095 if (info.inFormat == RTAUDIO_SINT8) {
10096 signed char *in = (signed char *)inBuffer;
10097 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10098 for (j=0; j<info.channels; j++) {
10099 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 16);
10100 //out[info.outOffset[j]] <<= 16;
10103 out += info.outJump;
10106 else if (info.inFormat == RTAUDIO_SINT16) {
10107 Int16 *in = (Int16 *)inBuffer;
10108 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10109 for (j=0; j<info.channels; j++) {
10110 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 8);
10111 //out[info.outOffset[j]] <<= 8;
10114 out += info.outJump;
10117 else if (info.inFormat == RTAUDIO_SINT24) {
10118 // Channel compensation and/or (de)interleaving only.
10119 Int24 *in = (Int24 *)inBuffer;
10120 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10121 for (j=0; j<info.channels; j++) {
10122 out[info.outOffset[j]] = in[info.inOffset[j]];
10125 out += info.outJump;
10128 else if (info.inFormat == RTAUDIO_SINT32) {
10129 Int32 *in = (Int32 *)inBuffer;
10130 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10131 for (j=0; j<info.channels; j++) {
10132 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] >> 8);
10133 //out[info.outOffset[j]] >>= 8;
10136 out += info.outJump;
10139 else if (info.inFormat == RTAUDIO_FLOAT32) {
10140 Float32 *in = (Float32 *)inBuffer;
10141 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10142 for (j=0; j<info.channels; j++) {
10143 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
10146 out += info.outJump;
10149 else if (info.inFormat == RTAUDIO_FLOAT64) {
10150 Float64 *in = (Float64 *)inBuffer;
10151 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10152 for (j=0; j<info.channels; j++) {
10153 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
10156 out += info.outJump;
10160 else if (info.outFormat == RTAUDIO_SINT16) {
10161 Int16 *out = (Int16 *)outBuffer;
10162 if (info.inFormat == RTAUDIO_SINT8) {
10163 signed char *in = (signed char *)inBuffer;
10164 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10165 for (j=0; j<info.channels; j++) {
10166 out[info.outOffset[j]] = (Int16) in[info.inOffset[j]];
10167 out[info.outOffset[j]] <<= 8;
10170 out += info.outJump;
10173 else if (info.inFormat == RTAUDIO_SINT16) {
10174 // Channel compensation and/or (de)interleaving only.
10175 Int16 *in = (Int16 *)inBuffer;
10176 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10177 for (j=0; j<info.channels; j++) {
10178 out[info.outOffset[j]] = in[info.inOffset[j]];
10181 out += info.outJump;
10184 else if (info.inFormat == RTAUDIO_SINT24) {
10185 Int24 *in = (Int24 *)inBuffer;
10186 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10187 for (j=0; j<info.channels; j++) {
10188 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]].asInt() >> 8);
10191 out += info.outJump;
10194 else if (info.inFormat == RTAUDIO_SINT32) {
10195 Int32 *in = (Int32 *)inBuffer;
10196 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10197 for (j=0; j<info.channels; j++) {
10198 out[info.outOffset[j]] = (Int16) ((in[info.inOffset[j]] >> 16) & 0x0000ffff);
10201 out += info.outJump;
10204 else if (info.inFormat == RTAUDIO_FLOAT32) {
10205 Float32 *in = (Float32 *)inBuffer;
10206 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10207 for (j=0; j<info.channels; j++) {
10208 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
10211 out += info.outJump;
10214 else if (info.inFormat == RTAUDIO_FLOAT64) {
10215 Float64 *in = (Float64 *)inBuffer;
10216 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10217 for (j=0; j<info.channels; j++) {
10218 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
10221 out += info.outJump;
10225 else if (info.outFormat == RTAUDIO_SINT8) {
10226 signed char *out = (signed char *)outBuffer;
10227 if (info.inFormat == RTAUDIO_SINT8) {
10228 // Channel compensation and/or (de)interleaving only.
10229 signed char *in = (signed char *)inBuffer;
10230 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10231 for (j=0; j<info.channels; j++) {
10232 out[info.outOffset[j]] = in[info.inOffset[j]];
10235 out += info.outJump;
10238 if (info.inFormat == RTAUDIO_SINT16) {
10239 Int16 *in = (Int16 *)inBuffer;
10240 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10241 for (j=0; j<info.channels; j++) {
10242 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 8) & 0x00ff);
10245 out += info.outJump;
10248 else if (info.inFormat == RTAUDIO_SINT24) {
10249 Int24 *in = (Int24 *)inBuffer;
10250 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10251 for (j=0; j<info.channels; j++) {
10252 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]].asInt() >> 16);
10255 out += info.outJump;
10258 else if (info.inFormat == RTAUDIO_SINT32) {
10259 Int32 *in = (Int32 *)inBuffer;
10260 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10261 for (j=0; j<info.channels; j++) {
10262 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 24) & 0x000000ff);
10265 out += info.outJump;
10268 else if (info.inFormat == RTAUDIO_FLOAT32) {
10269 Float32 *in = (Float32 *)inBuffer;
10270 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10271 for (j=0; j<info.channels; j++) {
10272 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
10275 out += info.outJump;
10278 else if (info.inFormat == RTAUDIO_FLOAT64) {
10279 Float64 *in = (Float64 *)inBuffer;
10280 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10281 for (j=0; j<info.channels; j++) {
10282 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
10285 out += info.outJump;
10291 //static inline uint16_t bswap_16(uint16_t x) { return (x>>8) | (x<<8); }
10292 //static inline uint32_t bswap_32(uint32_t x) { return (bswap_16(x&0xffff)<<16) | (bswap_16(x>>16)); }
10293 //static inline uint64_t bswap_64(uint64_t x) { return (((unsigned long long)bswap_32(x&0xffffffffull))<<32) | (bswap_32(x>>32)); }
10295 void RtApi :: byteSwapBuffer( char *buffer, unsigned int samples, RtAudioFormat format )
10301 if ( format == RTAUDIO_SINT16 ) {
10302 for ( unsigned int i=0; i<samples; i++ ) {
10303 // Swap 1st and 2nd bytes.
10308 // Increment 2 bytes.
10312 else if ( format == RTAUDIO_SINT32 ||
10313 format == RTAUDIO_FLOAT32 ) {
10314 for ( unsigned int i=0; i<samples; i++ ) {
10315 // Swap 1st and 4th bytes.
10320 // Swap 2nd and 3rd bytes.
10326 // Increment 3 more bytes.
10330 else if ( format == RTAUDIO_SINT24 ) {
10331 for ( unsigned int i=0; i<samples; i++ ) {
10332 // Swap 1st and 3rd bytes.
10337 // Increment 2 more bytes.
10341 else if ( format == RTAUDIO_FLOAT64 ) {
10342 for ( unsigned int i=0; i<samples; i++ ) {
10343 // Swap 1st and 8th bytes
10348 // Swap 2nd and 7th bytes
10354 // Swap 3rd and 6th bytes
10360 // Swap 4th and 5th bytes
10366 // Increment 5 more bytes.
10372 // Indentation settings for Vim and Emacs
10374 // Local Variables:
10375 // c-basic-offset: 2
10376 // indent-tabs-mode: nil
10379 // vim: et sts=2 sw=2