1 /************************************************************************/
3 \brief Realtime audio i/o C++ classes.
5 RtAudio provides a common API (Application Programming Interface)
6 for realtime audio input/output across Linux (native ALSA, Jack,
7 and OSS), Macintosh OS X (CoreAudio and Jack), and Windows
8 (DirectSound, ASIO and WASAPI) operating systems.
10 RtAudio WWW site: http://www.music.mcgill.ca/~gary/rtaudio/
12 RtAudio: realtime audio i/o C++ classes
13 Copyright (c) 2001-2017 Gary P. Scavone
15 Permission is hereby granted, free of charge, to any person
16 obtaining a copy of this software and associated documentation files
17 (the "Software"), to deal in the Software without restriction,
18 including without limitation the rights to use, copy, modify, merge,
19 publish, distribute, sublicense, and/or sell copies of the Software,
20 and to permit persons to whom the Software is furnished to do so,
21 subject to the following conditions:
23 The above copyright notice and this permission notice shall be
24 included in all copies or substantial portions of the Software.
26 Any person wishing to distribute modifications to the Software is
27 asked to send the modifications to the original developer so that
28 they can be incorporated into the canonical version. This is,
29 however, not a binding provision of this license.
31 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
32 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
33 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
34 IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
35 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
36 CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
37 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
39 /************************************************************************/
41 // RtAudio: Version 5.0.0
52 // Static variable definitions.
53 const unsigned int RtApi::MAX_SAMPLE_RATES = 14;
54 const unsigned int RtApi::SAMPLE_RATES[] = {
55 4000, 5512, 8000, 9600, 11025, 16000, 22050,
56 32000, 44100, 48000, 88200, 96000, 176400, 192000
59 #if defined(__WINDOWS_DS__) || defined(__WINDOWS_ASIO__) || defined(__WINDOWS_WASAPI__)
60 #define MUTEX_INITIALIZE(A) InitializeCriticalSection(A)
61 #define MUTEX_DESTROY(A) DeleteCriticalSection(A)
62 #define MUTEX_LOCK(A) EnterCriticalSection(A)
63 #define MUTEX_UNLOCK(A) LeaveCriticalSection(A)
67 static std::string convertCharPointerToStdString(const char *text)
69 return std::string(text);
72 static std::string convertCharPointerToStdString(const wchar_t *text)
74 int length = WideCharToMultiByte(CP_UTF8, 0, text, -1, NULL, 0, NULL, NULL);
75 std::string s( length-1, '\0' );
76 WideCharToMultiByte(CP_UTF8, 0, text, -1, &s[0], length, NULL, NULL);
80 #elif defined(__LINUX_ALSA__) || defined(__LINUX_PULSE__) || defined(__UNIX_JACK__) || defined(__LINUX_OSS__) || defined(__MACOSX_CORE__)
82 #define MUTEX_INITIALIZE(A) pthread_mutex_init(A, NULL)
83 #define MUTEX_DESTROY(A) pthread_mutex_destroy(A)
84 #define MUTEX_LOCK(A) pthread_mutex_lock(A)
85 #define MUTEX_UNLOCK(A) pthread_mutex_unlock(A)
87 #define MUTEX_INITIALIZE(A) abs(*A) // dummy definitions
88 #define MUTEX_DESTROY(A) abs(*A) // dummy definitions
91 // *************************************************** //
93 // RtAudio definitions.
95 // *************************************************** //
97 std::string RtAudio :: getVersion( void )
99 return RTAUDIO_VERSION;
102 void RtAudio :: getCompiledApi( std::vector<RtAudio::Api> &apis )
106 // The order here will control the order of RtAudio's API search in
108 #if defined(__UNIX_JACK__)
109 apis.push_back( UNIX_JACK );
111 #if defined(__LINUX_PULSE__)
112 apis.push_back( LINUX_PULSE );
114 #if defined(__LINUX_ALSA__)
115 apis.push_back( LINUX_ALSA );
117 #if defined(__LINUX_OSS__)
118 apis.push_back( LINUX_OSS );
120 #if defined(__WINDOWS_ASIO__)
121 apis.push_back( WINDOWS_ASIO );
123 #if defined(__WINDOWS_WASAPI__)
124 apis.push_back( WINDOWS_WASAPI );
126 #if defined(__WINDOWS_DS__)
127 apis.push_back( WINDOWS_DS );
129 #if defined(__MACOSX_CORE__)
130 apis.push_back( MACOSX_CORE );
132 #if defined(__RTAUDIO_DUMMY__)
133 apis.push_back( RTAUDIO_DUMMY );
137 const std::string &RtAudio :: getCompiledApiName( RtAudio::Api api )
139 #if defined(__UNIX_JACK__)
140 if ( api == UNIX_JACK ) {
141 static std::string name( "JACK" );
145 #if defined(__LINUX_PULSE__)
146 if ( api == LINUX_PULSE ) {
147 static std::string name( "PulseAudio" );
151 #if defined(__LINUX_ALSA__)
152 if ( api == LINUX_ALSA ) {
153 static std::string name( "ALSA" );
157 #if defined(__LINUX_OSS__)
158 if ( api == LINUX_OSS ) {
159 static std::string name( "OSS" );
163 #if defined(__WINDOWS_ASIO__)
164 if ( api == WINDOWS_ASIO ) {
165 static std::string name( "ASIO" );
169 #if defined(__WINDOWS_WASAPI__)
170 if ( api == WINDOWS_WASAPI ) {
171 static std::string name( "WASAPI" );
175 #if defined(__WINDOWS_DS__)
176 if ( api == WINDOWS_DS ) {
177 static std::string name( "DirectSound" );
181 #if defined(__MACOSX_CORE__)
182 if ( api == MACOSX_CORE ) {
183 static std::string name( "CoreAudio" );
187 #if defined(__RTAUDIO_DUMMY__)
188 if ( api == RTAUDIO_DUMMY ) {
189 static std::string name( "Dummy" );
193 static std::string name;
197 RtAudio::Api RtAudio :: getCompiledApiByName( const std::string &name )
199 unsigned int api_number = RtAudio::UNSPECIFIED;
200 size_t nameLength = name.size();
202 if ( nameLength == 0 )
203 return RtAudio::UNSPECIFIED;
205 while ( api_number <= RtAudio::RTAUDIO_DUMMY ) {
206 const std::string &otherName =
207 getCompiledApiName((RtAudio::Api)api_number);
209 bool equal = nameLength == otherName.size();
210 for ( size_t i = 0; equal && i < nameLength; ++i )
211 equal = tolower((unsigned char)name[i]) ==
212 tolower((unsigned char)otherName[i]);
215 return (RtAudio::Api)api_number;
220 return RtAudio::UNSPECIFIED;
223 void RtAudio :: openRtApi( RtAudio::Api api )
229 #if defined(__UNIX_JACK__)
230 if ( api == UNIX_JACK )
231 rtapi_ = new RtApiJack();
233 #if defined(__LINUX_ALSA__)
234 if ( api == LINUX_ALSA )
235 rtapi_ = new RtApiAlsa();
237 #if defined(__LINUX_PULSE__)
238 if ( api == LINUX_PULSE )
239 rtapi_ = new RtApiPulse();
241 #if defined(__LINUX_OSS__)
242 if ( api == LINUX_OSS )
243 rtapi_ = new RtApiOss();
245 #if defined(__WINDOWS_ASIO__)
246 if ( api == WINDOWS_ASIO )
247 rtapi_ = new RtApiAsio();
249 #if defined(__WINDOWS_WASAPI__)
250 if ( api == WINDOWS_WASAPI )
251 rtapi_ = new RtApiWasapi();
253 #if defined(__WINDOWS_DS__)
254 if ( api == WINDOWS_DS )
255 rtapi_ = new RtApiDs();
257 #if defined(__MACOSX_CORE__)
258 if ( api == MACOSX_CORE )
259 rtapi_ = new RtApiCore();
261 #if defined(__RTAUDIO_DUMMY__)
262 if ( api == RTAUDIO_DUMMY )
263 rtapi_ = new RtApiDummy();
267 RtAudio :: RtAudio( RtAudio::Api api )
271 if ( api != UNSPECIFIED ) {
272 // Attempt to open the specified API.
274 if ( rtapi_ ) return;
276 // No compiled support for specified API value. Issue a debug
277 // warning and continue as if no API was specified.
278 std::cerr << "\nRtAudio: no compiled support for specified API argument!\n" << std::endl;
281 // Iterate through the compiled APIs and return as soon as we find
282 // one with at least one device or we reach the end of the list.
283 std::vector< RtAudio::Api > apis;
284 getCompiledApi( apis );
285 for ( unsigned int i=0; i<apis.size(); i++ ) {
286 openRtApi( apis[i] );
287 if ( rtapi_ && rtapi_->getDeviceCount() ) break;
290 if ( rtapi_ ) return;
292 // It should not be possible to get here because the preprocessor
293 // definition __RTAUDIO_DUMMY__ is automatically defined if no
294 // API-specific definitions are passed to the compiler. But just in
295 // case something weird happens, we'll thow an error.
296 std::string errorText = "\nRtAudio: no compiled API support found ... critical error!!\n\n";
297 throw( RtAudioError( errorText, RtAudioError::UNSPECIFIED ) );
300 RtAudio :: ~RtAudio()
306 void RtAudio :: openStream( RtAudio::StreamParameters *outputParameters,
307 RtAudio::StreamParameters *inputParameters,
308 RtAudioFormat format, unsigned int sampleRate,
309 unsigned int *bufferFrames,
310 RtAudioCallback callback, void *userData,
311 RtAudio::StreamOptions *options,
312 RtAudioErrorCallback errorCallback )
314 return rtapi_->openStream( outputParameters, inputParameters, format,
315 sampleRate, bufferFrames, callback,
316 userData, options, errorCallback );
319 // *************************************************** //
321 // Public RtApi definitions (see end of file for
322 // private or protected utility functions).
324 // *************************************************** //
328 stream_.state = STREAM_CLOSED;
329 stream_.mode = UNINITIALIZED;
330 stream_.apiHandle = 0;
331 stream_.userBuffer[0] = 0;
332 stream_.userBuffer[1] = 0;
333 MUTEX_INITIALIZE( &stream_.mutex );
334 showWarnings_ = true;
335 firstErrorOccurred_ = false;
340 MUTEX_DESTROY( &stream_.mutex );
343 void RtApi :: openStream( RtAudio::StreamParameters *oParams,
344 RtAudio::StreamParameters *iParams,
345 RtAudioFormat format, unsigned int sampleRate,
346 unsigned int *bufferFrames,
347 RtAudioCallback callback, void *userData,
348 RtAudio::StreamOptions *options,
349 RtAudioErrorCallback errorCallback )
351 if ( stream_.state != STREAM_CLOSED ) {
352 errorText_ = "RtApi::openStream: a stream is already open!";
353 error( RtAudioError::INVALID_USE );
357 // Clear stream information potentially left from a previously open stream.
360 if ( oParams && oParams->nChannels < 1 ) {
361 errorText_ = "RtApi::openStream: a non-NULL output StreamParameters structure cannot have an nChannels value less than one.";
362 error( RtAudioError::INVALID_USE );
366 if ( iParams && iParams->nChannels < 1 ) {
367 errorText_ = "RtApi::openStream: a non-NULL input StreamParameters structure cannot have an nChannels value less than one.";
368 error( RtAudioError::INVALID_USE );
372 if ( oParams == NULL && iParams == NULL ) {
373 errorText_ = "RtApi::openStream: input and output StreamParameters structures are both NULL!";
374 error( RtAudioError::INVALID_USE );
378 if ( formatBytes(format) == 0 ) {
379 errorText_ = "RtApi::openStream: 'format' parameter value is undefined.";
380 error( RtAudioError::INVALID_USE );
384 unsigned int nDevices = getDeviceCount();
385 unsigned int oChannels = 0;
387 oChannels = oParams->nChannels;
388 if ( oParams->deviceId >= nDevices ) {
389 errorText_ = "RtApi::openStream: output device parameter value is invalid.";
390 error( RtAudioError::INVALID_USE );
395 unsigned int iChannels = 0;
397 iChannels = iParams->nChannels;
398 if ( iParams->deviceId >= nDevices ) {
399 errorText_ = "RtApi::openStream: input device parameter value is invalid.";
400 error( RtAudioError::INVALID_USE );
407 if ( oChannels > 0 ) {
409 result = probeDeviceOpen( oParams->deviceId, OUTPUT, oChannels, oParams->firstChannel,
410 sampleRate, format, bufferFrames, options );
411 if ( result == false ) {
412 error( RtAudioError::SYSTEM_ERROR );
417 if ( iChannels > 0 ) {
419 result = probeDeviceOpen( iParams->deviceId, INPUT, iChannels, iParams->firstChannel,
420 sampleRate, format, bufferFrames, options );
421 if ( result == false ) {
422 if ( oChannels > 0 ) closeStream();
423 error( RtAudioError::SYSTEM_ERROR );
428 stream_.callbackInfo.callback = (void *) callback;
429 stream_.callbackInfo.userData = userData;
430 stream_.callbackInfo.errorCallback = (void *) errorCallback;
432 if ( options ) options->numberOfBuffers = stream_.nBuffers;
433 stream_.state = STREAM_STOPPED;
436 unsigned int RtApi :: getDefaultInputDevice( void )
438 // Should be implemented in subclasses if possible.
442 unsigned int RtApi :: getDefaultOutputDevice( void )
444 // Should be implemented in subclasses if possible.
448 void RtApi :: closeStream( void )
450 // MUST be implemented in subclasses!
454 bool RtApi :: probeDeviceOpen( unsigned int /*device*/, StreamMode /*mode*/, unsigned int /*channels*/,
455 unsigned int /*firstChannel*/, unsigned int /*sampleRate*/,
456 RtAudioFormat /*format*/, unsigned int * /*bufferSize*/,
457 RtAudio::StreamOptions * /*options*/ )
459 // MUST be implemented in subclasses!
463 void RtApi :: tickStreamTime( void )
465 // Subclasses that do not provide their own implementation of
466 // getStreamTime should call this function once per buffer I/O to
467 // provide basic stream time support.
469 stream_.streamTime += ( stream_.bufferSize * 1.0 / stream_.sampleRate );
471 #if defined( HAVE_GETTIMEOFDAY )
472 gettimeofday( &stream_.lastTickTimestamp, NULL );
476 long RtApi :: getStreamLatency( void )
480 long totalLatency = 0;
481 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
482 totalLatency = stream_.latency[0];
483 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
484 totalLatency += stream_.latency[1];
489 double RtApi :: getStreamTime( void )
493 #if defined( HAVE_GETTIMEOFDAY )
494 // Return a very accurate estimate of the stream time by
495 // adding in the elapsed time since the last tick.
499 if ( stream_.state != STREAM_RUNNING || stream_.streamTime == 0.0 )
500 return stream_.streamTime;
502 gettimeofday( &now, NULL );
503 then = stream_.lastTickTimestamp;
504 return stream_.streamTime +
505 ((now.tv_sec + 0.000001 * now.tv_usec) -
506 (then.tv_sec + 0.000001 * then.tv_usec));
508 return stream_.streamTime;
512 void RtApi :: setStreamTime( double time )
517 stream_.streamTime = time;
518 #if defined( HAVE_GETTIMEOFDAY )
519 gettimeofday( &stream_.lastTickTimestamp, NULL );
523 unsigned int RtApi :: getStreamSampleRate( void )
527 return stream_.sampleRate;
531 // *************************************************** //
533 // OS/API-specific methods.
535 // *************************************************** //
537 #if defined(__MACOSX_CORE__)
539 // The OS X CoreAudio API is designed to use a separate callback
540 // procedure for each of its audio devices. A single RtAudio duplex
541 // stream using two different devices is supported here, though it
542 // cannot be guaranteed to always behave correctly because we cannot
543 // synchronize these two callbacks.
545 // A property listener is installed for over/underrun information.
546 // However, no functionality is currently provided to allow property
547 // listeners to trigger user handlers because it is unclear what could
548 // be done if a critical stream parameter (buffer size, sample rate,
549 // device disconnect) notification arrived. The listeners entail
550 // quite a bit of extra code and most likely, a user program wouldn't
551 // be prepared for the result anyway. However, we do provide a flag
552 // to the client callback function to inform of an over/underrun.
554 // A structure to hold various information related to the CoreAudio API
557 AudioDeviceID id[2]; // device ids
558 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
559 AudioDeviceIOProcID procId[2];
561 UInt32 iStream[2]; // device stream index (or first if using multiple)
562 UInt32 nStreams[2]; // number of streams to use
565 pthread_cond_t condition;
566 int drainCounter; // Tracks callback counts when draining
567 bool internalDrain; // Indicates if stop is initiated from callback or not.
570 :deviceBuffer(0), drainCounter(0), internalDrain(false) { nStreams[0] = 1; nStreams[1] = 1; id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
573 RtApiCore:: RtApiCore()
575 #if defined( AVAILABLE_MAC_OS_X_VERSION_10_6_AND_LATER )
576 // This is a largely undocumented but absolutely necessary
577 // requirement starting with OS-X 10.6. If not called, queries and
578 // updates to various audio device properties are not handled
580 CFRunLoopRef theRunLoop = NULL;
581 AudioObjectPropertyAddress property = { kAudioHardwarePropertyRunLoop,
582 kAudioObjectPropertyScopeGlobal,
583 kAudioObjectPropertyElementMaster };
584 OSStatus result = AudioObjectSetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, sizeof(CFRunLoopRef), &theRunLoop);
585 if ( result != noErr ) {
586 errorText_ = "RtApiCore::RtApiCore: error setting run loop property!";
587 error( RtAudioError::WARNING );
592 RtApiCore :: ~RtApiCore()
594 // The subclass destructor gets called before the base class
595 // destructor, so close an existing stream before deallocating
596 // apiDeviceId memory.
597 if ( stream_.state != STREAM_CLOSED ) closeStream();
600 unsigned int RtApiCore :: getDeviceCount( void )
602 // Find out how many audio devices there are, if any.
604 AudioObjectPropertyAddress propertyAddress = { kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
605 OSStatus result = AudioObjectGetPropertyDataSize( kAudioObjectSystemObject, &propertyAddress, 0, NULL, &dataSize );
606 if ( result != noErr ) {
607 errorText_ = "RtApiCore::getDeviceCount: OS-X error getting device info!";
608 error( RtAudioError::WARNING );
612 return dataSize / sizeof( AudioDeviceID );
615 unsigned int RtApiCore :: getDefaultInputDevice( void )
617 unsigned int nDevices = getDeviceCount();
618 if ( nDevices <= 1 ) return 0;
621 UInt32 dataSize = sizeof( AudioDeviceID );
622 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultInputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
623 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
624 if ( result != noErr ) {
625 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device.";
626 error( RtAudioError::WARNING );
630 dataSize *= nDevices;
631 AudioDeviceID deviceList[ nDevices ];
632 property.mSelector = kAudioHardwarePropertyDevices;
633 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
634 if ( result != noErr ) {
635 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device IDs.";
636 error( RtAudioError::WARNING );
640 for ( unsigned int i=0; i<nDevices; i++ )
641 if ( id == deviceList[i] ) return i;
643 errorText_ = "RtApiCore::getDefaultInputDevice: No default device found!";
644 error( RtAudioError::WARNING );
648 unsigned int RtApiCore :: getDefaultOutputDevice( void )
650 unsigned int nDevices = getDeviceCount();
651 if ( nDevices <= 1 ) return 0;
654 UInt32 dataSize = sizeof( AudioDeviceID );
655 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultOutputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
656 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
657 if ( result != noErr ) {
658 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device.";
659 error( RtAudioError::WARNING );
663 dataSize = sizeof( AudioDeviceID ) * nDevices;
664 AudioDeviceID deviceList[ nDevices ];
665 property.mSelector = kAudioHardwarePropertyDevices;
666 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
667 if ( result != noErr ) {
668 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device IDs.";
669 error( RtAudioError::WARNING );
673 for ( unsigned int i=0; i<nDevices; i++ )
674 if ( id == deviceList[i] ) return i;
676 errorText_ = "RtApiCore::getDefaultOutputDevice: No default device found!";
677 error( RtAudioError::WARNING );
681 RtAudio::DeviceInfo RtApiCore :: getDeviceInfo( unsigned int device )
683 RtAudio::DeviceInfo info;
687 unsigned int nDevices = getDeviceCount();
688 if ( nDevices == 0 ) {
689 errorText_ = "RtApiCore::getDeviceInfo: no devices found!";
690 error( RtAudioError::INVALID_USE );
694 if ( device >= nDevices ) {
695 errorText_ = "RtApiCore::getDeviceInfo: device ID is invalid!";
696 error( RtAudioError::INVALID_USE );
700 AudioDeviceID deviceList[ nDevices ];
701 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
702 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
703 kAudioObjectPropertyScopeGlobal,
704 kAudioObjectPropertyElementMaster };
705 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
706 0, NULL, &dataSize, (void *) &deviceList );
707 if ( result != noErr ) {
708 errorText_ = "RtApiCore::getDeviceInfo: OS-X system error getting device IDs.";
709 error( RtAudioError::WARNING );
713 AudioDeviceID id = deviceList[ device ];
715 // Get the device name.
718 dataSize = sizeof( CFStringRef );
719 property.mSelector = kAudioObjectPropertyManufacturer;
720 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
721 if ( result != noErr ) {
722 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device manufacturer.";
723 errorText_ = errorStream_.str();
724 error( RtAudioError::WARNING );
728 //const char *mname = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
729 int length = CFStringGetLength(cfname);
730 char *mname = (char *)malloc(length * 3 + 1);
731 #if defined( UNICODE ) || defined( _UNICODE )
732 CFStringGetCString(cfname, mname, length * 3 + 1, kCFStringEncodingUTF8);
734 CFStringGetCString(cfname, mname, length * 3 + 1, CFStringGetSystemEncoding());
736 info.name.append( (const char *)mname, strlen(mname) );
737 info.name.append( ": " );
741 property.mSelector = kAudioObjectPropertyName;
742 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
743 if ( result != noErr ) {
744 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device name.";
745 errorText_ = errorStream_.str();
746 error( RtAudioError::WARNING );
750 //const char *name = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
751 length = CFStringGetLength(cfname);
752 char *name = (char *)malloc(length * 3 + 1);
753 #if defined( UNICODE ) || defined( _UNICODE )
754 CFStringGetCString(cfname, name, length * 3 + 1, kCFStringEncodingUTF8);
756 CFStringGetCString(cfname, name, length * 3 + 1, CFStringGetSystemEncoding());
758 info.name.append( (const char *)name, strlen(name) );
762 // Get the output stream "configuration".
763 AudioBufferList *bufferList = nil;
764 property.mSelector = kAudioDevicePropertyStreamConfiguration;
765 property.mScope = kAudioDevicePropertyScopeOutput;
766 // property.mElement = kAudioObjectPropertyElementWildcard;
768 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
769 if ( result != noErr || dataSize == 0 ) {
770 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration info for device (" << device << ").";
771 errorText_ = errorStream_.str();
772 error( RtAudioError::WARNING );
776 // Allocate the AudioBufferList.
777 bufferList = (AudioBufferList *) malloc( dataSize );
778 if ( bufferList == NULL ) {
779 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating output AudioBufferList.";
780 error( RtAudioError::WARNING );
784 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
785 if ( result != noErr || dataSize == 0 ) {
787 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration for device (" << device << ").";
788 errorText_ = errorStream_.str();
789 error( RtAudioError::WARNING );
793 // Get output channel information.
794 unsigned int i, nStreams = bufferList->mNumberBuffers;
795 for ( i=0; i<nStreams; i++ )
796 info.outputChannels += bufferList->mBuffers[i].mNumberChannels;
799 // Get the input stream "configuration".
800 property.mScope = kAudioDevicePropertyScopeInput;
801 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
802 if ( result != noErr || dataSize == 0 ) {
803 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration info for device (" << device << ").";
804 errorText_ = errorStream_.str();
805 error( RtAudioError::WARNING );
809 // Allocate the AudioBufferList.
810 bufferList = (AudioBufferList *) malloc( dataSize );
811 if ( bufferList == NULL ) {
812 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating input AudioBufferList.";
813 error( RtAudioError::WARNING );
817 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
818 if (result != noErr || dataSize == 0) {
820 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration for device (" << device << ").";
821 errorText_ = errorStream_.str();
822 error( RtAudioError::WARNING );
826 // Get input channel information.
827 nStreams = bufferList->mNumberBuffers;
828 for ( i=0; i<nStreams; i++ )
829 info.inputChannels += bufferList->mBuffers[i].mNumberChannels;
832 // If device opens for both playback and capture, we determine the channels.
833 if ( info.outputChannels > 0 && info.inputChannels > 0 )
834 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
836 // Probe the device sample rates.
837 bool isInput = false;
838 if ( info.outputChannels == 0 ) isInput = true;
840 // Determine the supported sample rates.
841 property.mSelector = kAudioDevicePropertyAvailableNominalSampleRates;
842 if ( isInput == false ) property.mScope = kAudioDevicePropertyScopeOutput;
843 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
844 if ( result != kAudioHardwareNoError || dataSize == 0 ) {
845 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rate info.";
846 errorText_ = errorStream_.str();
847 error( RtAudioError::WARNING );
851 UInt32 nRanges = dataSize / sizeof( AudioValueRange );
852 AudioValueRange rangeList[ nRanges ];
853 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &rangeList );
854 if ( result != kAudioHardwareNoError ) {
855 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rates.";
856 errorText_ = errorStream_.str();
857 error( RtAudioError::WARNING );
861 // The sample rate reporting mechanism is a bit of a mystery. It
862 // seems that it can either return individual rates or a range of
863 // rates. I assume that if the min / max range values are the same,
864 // then that represents a single supported rate and if the min / max
865 // range values are different, the device supports an arbitrary
866 // range of values (though there might be multiple ranges, so we'll
867 // use the most conservative range).
868 Float64 minimumRate = 1.0, maximumRate = 10000000000.0;
869 bool haveValueRange = false;
870 info.sampleRates.clear();
871 for ( UInt32 i=0; i<nRanges; i++ ) {
872 if ( rangeList[i].mMinimum == rangeList[i].mMaximum ) {
873 unsigned int tmpSr = (unsigned int) rangeList[i].mMinimum;
874 info.sampleRates.push_back( tmpSr );
876 if ( !info.preferredSampleRate || ( tmpSr <= 48000 && tmpSr > info.preferredSampleRate ) )
877 info.preferredSampleRate = tmpSr;
880 haveValueRange = true;
881 if ( rangeList[i].mMinimum > minimumRate ) minimumRate = rangeList[i].mMinimum;
882 if ( rangeList[i].mMaximum < maximumRate ) maximumRate = rangeList[i].mMaximum;
886 if ( haveValueRange ) {
887 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
888 if ( SAMPLE_RATES[k] >= (unsigned int) minimumRate && SAMPLE_RATES[k] <= (unsigned int) maximumRate ) {
889 info.sampleRates.push_back( SAMPLE_RATES[k] );
891 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
892 info.preferredSampleRate = SAMPLE_RATES[k];
897 // Sort and remove any redundant values
898 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
899 info.sampleRates.erase( unique( info.sampleRates.begin(), info.sampleRates.end() ), info.sampleRates.end() );
901 if ( info.sampleRates.size() == 0 ) {
902 errorStream_ << "RtApiCore::probeDeviceInfo: No supported sample rates found for device (" << device << ").";
903 errorText_ = errorStream_.str();
904 error( RtAudioError::WARNING );
908 // CoreAudio always uses 32-bit floating point data for PCM streams.
909 // Thus, any other "physical" formats supported by the device are of
910 // no interest to the client.
911 info.nativeFormats = RTAUDIO_FLOAT32;
913 if ( info.outputChannels > 0 )
914 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
915 if ( info.inputChannels > 0 )
916 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
922 static OSStatus callbackHandler( AudioDeviceID inDevice,
923 const AudioTimeStamp* /*inNow*/,
924 const AudioBufferList* inInputData,
925 const AudioTimeStamp* /*inInputTime*/,
926 AudioBufferList* outOutputData,
927 const AudioTimeStamp* /*inOutputTime*/,
930 CallbackInfo *info = (CallbackInfo *) infoPointer;
932 RtApiCore *object = (RtApiCore *) info->object;
933 if ( object->callbackEvent( inDevice, inInputData, outOutputData ) == false )
934 return kAudioHardwareUnspecifiedError;
936 return kAudioHardwareNoError;
939 static OSStatus xrunListener( AudioObjectID /*inDevice*/,
941 const AudioObjectPropertyAddress properties[],
942 void* handlePointer )
944 CoreHandle *handle = (CoreHandle *) handlePointer;
945 for ( UInt32 i=0; i<nAddresses; i++ ) {
946 if ( properties[i].mSelector == kAudioDeviceProcessorOverload ) {
947 if ( properties[i].mScope == kAudioDevicePropertyScopeInput )
948 handle->xrun[1] = true;
950 handle->xrun[0] = true;
954 return kAudioHardwareNoError;
957 static OSStatus rateListener( AudioObjectID inDevice,
958 UInt32 /*nAddresses*/,
959 const AudioObjectPropertyAddress /*properties*/[],
962 Float64 *rate = (Float64 *) ratePointer;
963 UInt32 dataSize = sizeof( Float64 );
964 AudioObjectPropertyAddress property = { kAudioDevicePropertyNominalSampleRate,
965 kAudioObjectPropertyScopeGlobal,
966 kAudioObjectPropertyElementMaster };
967 AudioObjectGetPropertyData( inDevice, &property, 0, NULL, &dataSize, rate );
968 return kAudioHardwareNoError;
971 bool RtApiCore :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
972 unsigned int firstChannel, unsigned int sampleRate,
973 RtAudioFormat format, unsigned int *bufferSize,
974 RtAudio::StreamOptions *options )
977 unsigned int nDevices = getDeviceCount();
978 if ( nDevices == 0 ) {
979 // This should not happen because a check is made before this function is called.
980 errorText_ = "RtApiCore::probeDeviceOpen: no devices found!";
984 if ( device >= nDevices ) {
985 // This should not happen because a check is made before this function is called.
986 errorText_ = "RtApiCore::probeDeviceOpen: device ID is invalid!";
990 AudioDeviceID deviceList[ nDevices ];
991 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
992 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
993 kAudioObjectPropertyScopeGlobal,
994 kAudioObjectPropertyElementMaster };
995 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
996 0, NULL, &dataSize, (void *) &deviceList );
997 if ( result != noErr ) {
998 errorText_ = "RtApiCore::probeDeviceOpen: OS-X system error getting device IDs.";
1002 AudioDeviceID id = deviceList[ device ];
1004 // Setup for stream mode.
1005 bool isInput = false;
1006 if ( mode == INPUT ) {
1008 property.mScope = kAudioDevicePropertyScopeInput;
1011 property.mScope = kAudioDevicePropertyScopeOutput;
1013 // Get the stream "configuration".
1014 AudioBufferList *bufferList = nil;
1016 property.mSelector = kAudioDevicePropertyStreamConfiguration;
1017 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
1018 if ( result != noErr || dataSize == 0 ) {
1019 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration info for device (" << device << ").";
1020 errorText_ = errorStream_.str();
1024 // Allocate the AudioBufferList.
1025 bufferList = (AudioBufferList *) malloc( dataSize );
1026 if ( bufferList == NULL ) {
1027 errorText_ = "RtApiCore::probeDeviceOpen: memory error allocating AudioBufferList.";
1031 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
1032 if (result != noErr || dataSize == 0) {
1034 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration for device (" << device << ").";
1035 errorText_ = errorStream_.str();
1039 // Search for one or more streams that contain the desired number of
1040 // channels. CoreAudio devices can have an arbitrary number of
1041 // streams and each stream can have an arbitrary number of channels.
1042 // For each stream, a single buffer of interleaved samples is
1043 // provided. RtAudio prefers the use of one stream of interleaved
1044 // data or multiple consecutive single-channel streams. However, we
1045 // now support multiple consecutive multi-channel streams of
1046 // interleaved data as well.
1047 UInt32 iStream, offsetCounter = firstChannel;
1048 UInt32 nStreams = bufferList->mNumberBuffers;
1049 bool monoMode = false;
1050 bool foundStream = false;
1052 // First check that the device supports the requested number of
1054 UInt32 deviceChannels = 0;
1055 for ( iStream=0; iStream<nStreams; iStream++ )
1056 deviceChannels += bufferList->mBuffers[iStream].mNumberChannels;
1058 if ( deviceChannels < ( channels + firstChannel ) ) {
1060 errorStream_ << "RtApiCore::probeDeviceOpen: the device (" << device << ") does not support the requested channel count.";
1061 errorText_ = errorStream_.str();
1065 // Look for a single stream meeting our needs.
1066 UInt32 firstStream, streamCount = 1, streamChannels = 0, channelOffset = 0;
1067 for ( iStream=0; iStream<nStreams; iStream++ ) {
1068 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
1069 if ( streamChannels >= channels + offsetCounter ) {
1070 firstStream = iStream;
1071 channelOffset = offsetCounter;
1075 if ( streamChannels > offsetCounter ) break;
1076 offsetCounter -= streamChannels;
1079 // If we didn't find a single stream above, then we should be able
1080 // to meet the channel specification with multiple streams.
1081 if ( foundStream == false ) {
1083 offsetCounter = firstChannel;
1084 for ( iStream=0; iStream<nStreams; iStream++ ) {
1085 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
1086 if ( streamChannels > offsetCounter ) break;
1087 offsetCounter -= streamChannels;
1090 firstStream = iStream;
1091 channelOffset = offsetCounter;
1092 Int32 channelCounter = channels + offsetCounter - streamChannels;
1094 if ( streamChannels > 1 ) monoMode = false;
1095 while ( channelCounter > 0 ) {
1096 streamChannels = bufferList->mBuffers[++iStream].mNumberChannels;
1097 if ( streamChannels > 1 ) monoMode = false;
1098 channelCounter -= streamChannels;
1105 // Determine the buffer size.
1106 AudioValueRange bufferRange;
1107 dataSize = sizeof( AudioValueRange );
1108 property.mSelector = kAudioDevicePropertyBufferFrameSizeRange;
1109 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &bufferRange );
1111 if ( result != noErr ) {
1112 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting buffer size range for device (" << device << ").";
1113 errorText_ = errorStream_.str();
1117 if ( bufferRange.mMinimum > *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMinimum;
1118 else if ( bufferRange.mMaximum < *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMaximum;
1119 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) *bufferSize = (unsigned long) bufferRange.mMinimum;
1121 // Set the buffer size. For multiple streams, I'm assuming we only
1122 // need to make this setting for the master channel.
1123 UInt32 theSize = (UInt32) *bufferSize;
1124 dataSize = sizeof( UInt32 );
1125 property.mSelector = kAudioDevicePropertyBufferFrameSize;
1126 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &theSize );
1128 if ( result != noErr ) {
1129 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting the buffer size for device (" << device << ").";
1130 errorText_ = errorStream_.str();
1134 // If attempting to setup a duplex stream, the bufferSize parameter
1135 // MUST be the same in both directions!
1136 *bufferSize = theSize;
1137 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
1138 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << device << ").";
1139 errorText_ = errorStream_.str();
1143 stream_.bufferSize = *bufferSize;
1144 stream_.nBuffers = 1;
1146 // Try to set "hog" mode ... it's not clear to me this is working.
1147 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) {
1149 dataSize = sizeof( hog_pid );
1150 property.mSelector = kAudioDevicePropertyHogMode;
1151 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &hog_pid );
1152 if ( result != noErr ) {
1153 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting 'hog' state!";
1154 errorText_ = errorStream_.str();
1158 if ( hog_pid != getpid() ) {
1160 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &hog_pid );
1161 if ( result != noErr ) {
1162 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting 'hog' state!";
1163 errorText_ = errorStream_.str();
1169 // Check and if necessary, change the sample rate for the device.
1170 Float64 nominalRate;
1171 dataSize = sizeof( Float64 );
1172 property.mSelector = kAudioDevicePropertyNominalSampleRate;
1173 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &nominalRate );
1174 if ( result != noErr ) {
1175 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting current sample rate.";
1176 errorText_ = errorStream_.str();
1180 // Only change the sample rate if off by more than 1 Hz.
1181 if ( fabs( nominalRate - (double)sampleRate ) > 1.0 ) {
1183 // Set a property listener for the sample rate change
1184 Float64 reportedRate = 0.0;
1185 AudioObjectPropertyAddress tmp = { kAudioDevicePropertyNominalSampleRate, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
1186 result = AudioObjectAddPropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1187 if ( result != noErr ) {
1188 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate property listener for device (" << device << ").";
1189 errorText_ = errorStream_.str();
1193 nominalRate = (Float64) sampleRate;
1194 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &nominalRate );
1195 if ( result != noErr ) {
1196 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1197 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate for device (" << device << ").";
1198 errorText_ = errorStream_.str();
1202 // Now wait until the reported nominal rate is what we just set.
1203 UInt32 microCounter = 0;
1204 while ( reportedRate != nominalRate ) {
1205 microCounter += 5000;
1206 if ( microCounter > 5000000 ) break;
1210 // Remove the property listener.
1211 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1213 if ( microCounter > 5000000 ) {
1214 errorStream_ << "RtApiCore::probeDeviceOpen: timeout waiting for sample rate update for device (" << device << ").";
1215 errorText_ = errorStream_.str();
1220 // Now set the stream format for all streams. Also, check the
1221 // physical format of the device and change that if necessary.
1222 AudioStreamBasicDescription description;
1223 dataSize = sizeof( AudioStreamBasicDescription );
1224 property.mSelector = kAudioStreamPropertyVirtualFormat;
1225 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
1226 if ( result != noErr ) {
1227 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream format for device (" << device << ").";
1228 errorText_ = errorStream_.str();
1232 // Set the sample rate and data format id. However, only make the
1233 // change if the sample rate is not within 1.0 of the desired
1234 // rate and the format is not linear pcm.
1235 bool updateFormat = false;
1236 if ( fabs( description.mSampleRate - (Float64)sampleRate ) > 1.0 ) {
1237 description.mSampleRate = (Float64) sampleRate;
1238 updateFormat = true;
1241 if ( description.mFormatID != kAudioFormatLinearPCM ) {
1242 description.mFormatID = kAudioFormatLinearPCM;
1243 updateFormat = true;
1246 if ( updateFormat ) {
1247 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &description );
1248 if ( result != noErr ) {
1249 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate or data format for device (" << device << ").";
1250 errorText_ = errorStream_.str();
1255 // Now check the physical format.
1256 property.mSelector = kAudioStreamPropertyPhysicalFormat;
1257 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
1258 if ( result != noErr ) {
1259 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream physical format for device (" << device << ").";
1260 errorText_ = errorStream_.str();
1264 //std::cout << "Current physical stream format:" << std::endl;
1265 //std::cout << " mBitsPerChan = " << description.mBitsPerChannel << std::endl;
1266 //std::cout << " aligned high = " << (description.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (description.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
1267 //std::cout << " bytesPerFrame = " << description.mBytesPerFrame << std::endl;
1268 //std::cout << " sample rate = " << description.mSampleRate << std::endl;
1270 if ( description.mFormatID != kAudioFormatLinearPCM || description.mBitsPerChannel < 16 ) {
1271 description.mFormatID = kAudioFormatLinearPCM;
1272 //description.mSampleRate = (Float64) sampleRate;
1273 AudioStreamBasicDescription testDescription = description;
1276 // We'll try higher bit rates first and then work our way down.
1277 std::vector< std::pair<UInt32, UInt32> > physicalFormats;
1278 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsFloat) & ~kLinearPCMFormatFlagIsSignedInteger;
1279 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
1280 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
1281 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
1282 physicalFormats.push_back( std::pair<Float32, UInt32>( 24, formatFlags ) ); // 24-bit packed
1283 formatFlags &= ~( kAudioFormatFlagIsPacked | kAudioFormatFlagIsAlignedHigh );
1284 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.2, formatFlags ) ); // 24-bit in 4 bytes, aligned low
1285 formatFlags |= kAudioFormatFlagIsAlignedHigh;
1286 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.4, formatFlags ) ); // 24-bit in 4 bytes, aligned high
1287 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
1288 physicalFormats.push_back( std::pair<Float32, UInt32>( 16, formatFlags ) );
1289 physicalFormats.push_back( std::pair<Float32, UInt32>( 8, formatFlags ) );
1291 bool setPhysicalFormat = false;
1292 for( unsigned int i=0; i<physicalFormats.size(); i++ ) {
1293 testDescription = description;
1294 testDescription.mBitsPerChannel = (UInt32) physicalFormats[i].first;
1295 testDescription.mFormatFlags = physicalFormats[i].second;
1296 if ( (24 == (UInt32)physicalFormats[i].first) && ~( physicalFormats[i].second & kAudioFormatFlagIsPacked ) )
1297 testDescription.mBytesPerFrame = 4 * testDescription.mChannelsPerFrame;
1299 testDescription.mBytesPerFrame = testDescription.mBitsPerChannel/8 * testDescription.mChannelsPerFrame;
1300 testDescription.mBytesPerPacket = testDescription.mBytesPerFrame * testDescription.mFramesPerPacket;
1301 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &testDescription );
1302 if ( result == noErr ) {
1303 setPhysicalFormat = true;
1304 //std::cout << "Updated physical stream format:" << std::endl;
1305 //std::cout << " mBitsPerChan = " << testDescription.mBitsPerChannel << std::endl;
1306 //std::cout << " aligned high = " << (testDescription.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (testDescription.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
1307 //std::cout << " bytesPerFrame = " << testDescription.mBytesPerFrame << std::endl;
1308 //std::cout << " sample rate = " << testDescription.mSampleRate << std::endl;
1313 if ( !setPhysicalFormat ) {
1314 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting physical data format for device (" << device << ").";
1315 errorText_ = errorStream_.str();
1318 } // done setting virtual/physical formats.
1320 // Get the stream / device latency.
1322 dataSize = sizeof( UInt32 );
1323 property.mSelector = kAudioDevicePropertyLatency;
1324 if ( AudioObjectHasProperty( id, &property ) == true ) {
1325 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &latency );
1326 if ( result == kAudioHardwareNoError ) stream_.latency[ mode ] = latency;
1328 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting device latency for device (" << device << ").";
1329 errorText_ = errorStream_.str();
1330 error( RtAudioError::WARNING );
1334 // Byte-swapping: According to AudioHardware.h, the stream data will
1335 // always be presented in native-endian format, so we should never
1336 // need to byte swap.
1337 stream_.doByteSwap[mode] = false;
1339 // From the CoreAudio documentation, PCM data must be supplied as
1341 stream_.userFormat = format;
1342 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
1344 if ( streamCount == 1 )
1345 stream_.nDeviceChannels[mode] = description.mChannelsPerFrame;
1346 else // multiple streams
1347 stream_.nDeviceChannels[mode] = channels;
1348 stream_.nUserChannels[mode] = channels;
1349 stream_.channelOffset[mode] = channelOffset; // offset within a CoreAudio stream
1350 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
1351 else stream_.userInterleaved = true;
1352 stream_.deviceInterleaved[mode] = true;
1353 if ( monoMode == true ) stream_.deviceInterleaved[mode] = false;
1355 // Set flags for buffer conversion.
1356 stream_.doConvertBuffer[mode] = false;
1357 if ( stream_.userFormat != stream_.deviceFormat[mode] )
1358 stream_.doConvertBuffer[mode] = true;
1359 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
1360 stream_.doConvertBuffer[mode] = true;
1361 if ( streamCount == 1 ) {
1362 if ( stream_.nUserChannels[mode] > 1 &&
1363 stream_.userInterleaved != stream_.deviceInterleaved[mode] )
1364 stream_.doConvertBuffer[mode] = true;
1366 else if ( monoMode && stream_.userInterleaved )
1367 stream_.doConvertBuffer[mode] = true;
1369 // Allocate our CoreHandle structure for the stream.
1370 CoreHandle *handle = 0;
1371 if ( stream_.apiHandle == 0 ) {
1373 handle = new CoreHandle;
1375 catch ( std::bad_alloc& ) {
1376 errorText_ = "RtApiCore::probeDeviceOpen: error allocating CoreHandle memory.";
1380 if ( pthread_cond_init( &handle->condition, NULL ) ) {
1381 errorText_ = "RtApiCore::probeDeviceOpen: error initializing pthread condition variable.";
1384 stream_.apiHandle = (void *) handle;
1387 handle = (CoreHandle *) stream_.apiHandle;
1388 handle->iStream[mode] = firstStream;
1389 handle->nStreams[mode] = streamCount;
1390 handle->id[mode] = id;
1392 // Allocate necessary internal buffers.
1393 unsigned long bufferBytes;
1394 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
1395 // stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
1396 stream_.userBuffer[mode] = (char *) malloc( bufferBytes * sizeof(char) );
1397 memset( stream_.userBuffer[mode], 0, bufferBytes * sizeof(char) );
1398 if ( stream_.userBuffer[mode] == NULL ) {
1399 errorText_ = "RtApiCore::probeDeviceOpen: error allocating user buffer memory.";
1403 // If possible, we will make use of the CoreAudio stream buffers as
1404 // "device buffers". However, we can't do this if using multiple
1406 if ( stream_.doConvertBuffer[mode] && handle->nStreams[mode] > 1 ) {
1408 bool makeBuffer = true;
1409 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
1410 if ( mode == INPUT ) {
1411 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
1412 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
1413 if ( bufferBytes <= bytesOut ) makeBuffer = false;
1418 bufferBytes *= *bufferSize;
1419 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
1420 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
1421 if ( stream_.deviceBuffer == NULL ) {
1422 errorText_ = "RtApiCore::probeDeviceOpen: error allocating device buffer memory.";
1428 stream_.sampleRate = sampleRate;
1429 stream_.device[mode] = device;
1430 stream_.state = STREAM_STOPPED;
1431 stream_.callbackInfo.object = (void *) this;
1433 // Setup the buffer conversion information structure.
1434 if ( stream_.doConvertBuffer[mode] ) {
1435 if ( streamCount > 1 ) setConvertInfo( mode, 0 );
1436 else setConvertInfo( mode, channelOffset );
1439 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device )
1440 // Only one callback procedure per device.
1441 stream_.mode = DUPLEX;
1443 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1444 result = AudioDeviceCreateIOProcID( id, callbackHandler, (void *) &stream_.callbackInfo, &handle->procId[mode] );
1446 // deprecated in favor of AudioDeviceCreateIOProcID()
1447 result = AudioDeviceAddIOProc( id, callbackHandler, (void *) &stream_.callbackInfo );
1449 if ( result != noErr ) {
1450 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting callback for device (" << device << ").";
1451 errorText_ = errorStream_.str();
1454 if ( stream_.mode == OUTPUT && mode == INPUT )
1455 stream_.mode = DUPLEX;
1457 stream_.mode = mode;
1460 // Setup the device property listener for over/underload.
1461 property.mSelector = kAudioDeviceProcessorOverload;
1462 property.mScope = kAudioObjectPropertyScopeGlobal;
1463 result = AudioObjectAddPropertyListener( id, &property, xrunListener, (void *) handle );
1469 pthread_cond_destroy( &handle->condition );
1471 stream_.apiHandle = 0;
1474 for ( int i=0; i<2; i++ ) {
1475 if ( stream_.userBuffer[i] ) {
1476 free( stream_.userBuffer[i] );
1477 stream_.userBuffer[i] = 0;
1481 if ( stream_.deviceBuffer ) {
1482 free( stream_.deviceBuffer );
1483 stream_.deviceBuffer = 0;
1486 stream_.state = STREAM_CLOSED;
1490 void RtApiCore :: closeStream( void )
1492 if ( stream_.state == STREAM_CLOSED ) {
1493 errorText_ = "RtApiCore::closeStream(): no open stream to close!";
1494 error( RtAudioError::WARNING );
1498 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1499 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1501 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
1502 kAudioObjectPropertyScopeGlobal,
1503 kAudioObjectPropertyElementMaster };
1505 property.mSelector = kAudioDeviceProcessorOverload;
1506 property.mScope = kAudioObjectPropertyScopeGlobal;
1507 if (AudioObjectRemovePropertyListener( handle->id[0], &property, xrunListener, (void *) handle ) != noErr) {
1508 errorText_ = "RtApiCore::closeStream(): error removing property listener!";
1509 error( RtAudioError::WARNING );
1512 if ( stream_.state == STREAM_RUNNING )
1513 AudioDeviceStop( handle->id[0], callbackHandler );
1514 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1515 AudioDeviceDestroyIOProcID( handle->id[0], handle->procId[0] );
1517 // deprecated in favor of AudioDeviceDestroyIOProcID()
1518 AudioDeviceRemoveIOProc( handle->id[0], callbackHandler );
1522 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1524 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
1525 kAudioObjectPropertyScopeGlobal,
1526 kAudioObjectPropertyElementMaster };
1528 property.mSelector = kAudioDeviceProcessorOverload;
1529 property.mScope = kAudioObjectPropertyScopeGlobal;
1530 if (AudioObjectRemovePropertyListener( handle->id[1], &property, xrunListener, (void *) handle ) != noErr) {
1531 errorText_ = "RtApiCore::closeStream(): error removing property listener!";
1532 error( RtAudioError::WARNING );
1535 if ( stream_.state == STREAM_RUNNING )
1536 AudioDeviceStop( handle->id[1], callbackHandler );
1537 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1538 AudioDeviceDestroyIOProcID( handle->id[1], handle->procId[1] );
1540 // deprecated in favor of AudioDeviceDestroyIOProcID()
1541 AudioDeviceRemoveIOProc( handle->id[1], callbackHandler );
1545 for ( int i=0; i<2; i++ ) {
1546 if ( stream_.userBuffer[i] ) {
1547 free( stream_.userBuffer[i] );
1548 stream_.userBuffer[i] = 0;
1552 if ( stream_.deviceBuffer ) {
1553 free( stream_.deviceBuffer );
1554 stream_.deviceBuffer = 0;
1557 // Destroy pthread condition variable.
1558 pthread_cond_destroy( &handle->condition );
1560 stream_.apiHandle = 0;
1562 stream_.mode = UNINITIALIZED;
1563 stream_.state = STREAM_CLOSED;
1566 void RtApiCore :: startStream( void )
1569 if ( stream_.state == STREAM_RUNNING ) {
1570 errorText_ = "RtApiCore::startStream(): the stream is already running!";
1571 error( RtAudioError::WARNING );
1575 OSStatus result = noErr;
1576 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1577 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1579 result = AudioDeviceStart( handle->id[0], callbackHandler );
1580 if ( result != noErr ) {
1581 errorStream_ << "RtApiCore::startStream: system error (" << getErrorCode( result ) << ") starting callback procedure on device (" << stream_.device[0] << ").";
1582 errorText_ = errorStream_.str();
1587 if ( stream_.mode == INPUT ||
1588 ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1590 result = AudioDeviceStart( handle->id[1], callbackHandler );
1591 if ( result != noErr ) {
1592 errorStream_ << "RtApiCore::startStream: system error starting input callback procedure on device (" << stream_.device[1] << ").";
1593 errorText_ = errorStream_.str();
1598 handle->drainCounter = 0;
1599 handle->internalDrain = false;
1600 stream_.state = STREAM_RUNNING;
1603 if ( result == noErr ) return;
1604 error( RtAudioError::SYSTEM_ERROR );
1607 void RtApiCore :: stopStream( void )
1610 if ( stream_.state == STREAM_STOPPED ) {
1611 errorText_ = "RtApiCore::stopStream(): the stream is already stopped!";
1612 error( RtAudioError::WARNING );
1616 OSStatus result = noErr;
1617 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1618 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1620 if ( handle->drainCounter == 0 ) {
1621 handle->drainCounter = 2;
1622 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
1625 result = AudioDeviceStop( handle->id[0], callbackHandler );
1626 if ( result != noErr ) {
1627 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping callback procedure on device (" << stream_.device[0] << ").";
1628 errorText_ = errorStream_.str();
1633 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1635 result = AudioDeviceStop( handle->id[1], callbackHandler );
1636 if ( result != noErr ) {
1637 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping input callback procedure on device (" << stream_.device[1] << ").";
1638 errorText_ = errorStream_.str();
1643 stream_.state = STREAM_STOPPED;
1646 if ( result == noErr ) return;
1647 error( RtAudioError::SYSTEM_ERROR );
1650 void RtApiCore :: abortStream( void )
1653 if ( stream_.state == STREAM_STOPPED ) {
1654 errorText_ = "RtApiCore::abortStream(): the stream is already stopped!";
1655 error( RtAudioError::WARNING );
1659 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1660 handle->drainCounter = 2;
1665 // This function will be called by a spawned thread when the user
1666 // callback function signals that the stream should be stopped or
1667 // aborted. It is better to handle it this way because the
1668 // callbackEvent() function probably should return before the AudioDeviceStop()
1669 // function is called.
1670 static void *coreStopStream( void *ptr )
1672 CallbackInfo *info = (CallbackInfo *) ptr;
1673 RtApiCore *object = (RtApiCore *) info->object;
1675 object->stopStream();
1676 pthread_exit( NULL );
1679 bool RtApiCore :: callbackEvent( AudioDeviceID deviceId,
1680 const AudioBufferList *inBufferList,
1681 const AudioBufferList *outBufferList )
1683 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
1684 if ( stream_.state == STREAM_CLOSED ) {
1685 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
1686 error( RtAudioError::WARNING );
1690 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
1691 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1693 // Check if we were draining the stream and signal is finished.
1694 if ( handle->drainCounter > 3 ) {
1695 ThreadHandle threadId;
1697 stream_.state = STREAM_STOPPING;
1698 if ( handle->internalDrain == true )
1699 pthread_create( &threadId, NULL, coreStopStream, info );
1700 else // external call to stopStream()
1701 pthread_cond_signal( &handle->condition );
1705 AudioDeviceID outputDevice = handle->id[0];
1707 // Invoke user callback to get fresh output data UNLESS we are
1708 // draining stream or duplex mode AND the input/output devices are
1709 // different AND this function is called for the input device.
1710 if ( handle->drainCounter == 0 && ( stream_.mode != DUPLEX || deviceId == outputDevice ) ) {
1711 RtAudioCallback callback = (RtAudioCallback) info->callback;
1712 double streamTime = getStreamTime();
1713 RtAudioStreamStatus status = 0;
1714 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
1715 status |= RTAUDIO_OUTPUT_UNDERFLOW;
1716 handle->xrun[0] = false;
1718 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
1719 status |= RTAUDIO_INPUT_OVERFLOW;
1720 handle->xrun[1] = false;
1723 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
1724 stream_.bufferSize, streamTime, status, info->userData );
1725 if ( cbReturnValue == 2 ) {
1726 stream_.state = STREAM_STOPPING;
1727 handle->drainCounter = 2;
1731 else if ( cbReturnValue == 1 ) {
1732 handle->drainCounter = 1;
1733 handle->internalDrain = true;
1737 if ( stream_.mode == OUTPUT || ( stream_.mode == DUPLEX && deviceId == outputDevice ) ) {
1739 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
1741 if ( handle->nStreams[0] == 1 ) {
1742 memset( outBufferList->mBuffers[handle->iStream[0]].mData,
1744 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
1746 else { // fill multiple streams with zeros
1747 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
1748 memset( outBufferList->mBuffers[handle->iStream[0]+i].mData,
1750 outBufferList->mBuffers[handle->iStream[0]+i].mDataByteSize );
1754 else if ( handle->nStreams[0] == 1 ) {
1755 if ( stream_.doConvertBuffer[0] ) { // convert directly to CoreAudio stream buffer
1756 convertBuffer( (char *) outBufferList->mBuffers[handle->iStream[0]].mData,
1757 stream_.userBuffer[0], stream_.convertInfo[0] );
1759 else { // copy from user buffer
1760 memcpy( outBufferList->mBuffers[handle->iStream[0]].mData,
1761 stream_.userBuffer[0],
1762 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
1765 else { // fill multiple streams
1766 Float32 *inBuffer = (Float32 *) stream_.userBuffer[0];
1767 if ( stream_.doConvertBuffer[0] ) {
1768 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
1769 inBuffer = (Float32 *) stream_.deviceBuffer;
1772 if ( stream_.deviceInterleaved[0] == false ) { // mono mode
1773 UInt32 bufferBytes = outBufferList->mBuffers[handle->iStream[0]].mDataByteSize;
1774 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
1775 memcpy( outBufferList->mBuffers[handle->iStream[0]+i].mData,
1776 (void *)&inBuffer[i*stream_.bufferSize], bufferBytes );
1779 else { // fill multiple multi-channel streams with interleaved data
1780 UInt32 streamChannels, channelsLeft, inJump, outJump, inOffset;
1783 bool inInterleaved = ( stream_.userInterleaved ) ? true : false;
1784 UInt32 inChannels = stream_.nUserChannels[0];
1785 if ( stream_.doConvertBuffer[0] ) {
1786 inInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
1787 inChannels = stream_.nDeviceChannels[0];
1790 if ( inInterleaved ) inOffset = 1;
1791 else inOffset = stream_.bufferSize;
1793 channelsLeft = inChannels;
1794 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
1796 out = (Float32 *) outBufferList->mBuffers[handle->iStream[0]+i].mData;
1797 streamChannels = outBufferList->mBuffers[handle->iStream[0]+i].mNumberChannels;
1800 // Account for possible channel offset in first stream
1801 if ( i == 0 && stream_.channelOffset[0] > 0 ) {
1802 streamChannels -= stream_.channelOffset[0];
1803 outJump = stream_.channelOffset[0];
1807 // Account for possible unfilled channels at end of the last stream
1808 if ( streamChannels > channelsLeft ) {
1809 outJump = streamChannels - channelsLeft;
1810 streamChannels = channelsLeft;
1813 // Determine input buffer offsets and skips
1814 if ( inInterleaved ) {
1815 inJump = inChannels;
1816 in += inChannels - channelsLeft;
1820 in += (inChannels - channelsLeft) * inOffset;
1823 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
1824 for ( unsigned int j=0; j<streamChannels; j++ ) {
1825 *out++ = in[j*inOffset];
1830 channelsLeft -= streamChannels;
1836 // Don't bother draining input
1837 if ( handle->drainCounter ) {
1838 handle->drainCounter++;
1842 AudioDeviceID inputDevice;
1843 inputDevice = handle->id[1];
1844 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && deviceId == inputDevice ) ) {
1846 if ( handle->nStreams[1] == 1 ) {
1847 if ( stream_.doConvertBuffer[1] ) { // convert directly from CoreAudio stream buffer
1848 convertBuffer( stream_.userBuffer[1],
1849 (char *) inBufferList->mBuffers[handle->iStream[1]].mData,
1850 stream_.convertInfo[1] );
1852 else { // copy to user buffer
1853 memcpy( stream_.userBuffer[1],
1854 inBufferList->mBuffers[handle->iStream[1]].mData,
1855 inBufferList->mBuffers[handle->iStream[1]].mDataByteSize );
1858 else { // read from multiple streams
1859 Float32 *outBuffer = (Float32 *) stream_.userBuffer[1];
1860 if ( stream_.doConvertBuffer[1] ) outBuffer = (Float32 *) stream_.deviceBuffer;
1862 if ( stream_.deviceInterleaved[1] == false ) { // mono mode
1863 UInt32 bufferBytes = inBufferList->mBuffers[handle->iStream[1]].mDataByteSize;
1864 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
1865 memcpy( (void *)&outBuffer[i*stream_.bufferSize],
1866 inBufferList->mBuffers[handle->iStream[1]+i].mData, bufferBytes );
1869 else { // read from multiple multi-channel streams
1870 UInt32 streamChannels, channelsLeft, inJump, outJump, outOffset;
1873 bool outInterleaved = ( stream_.userInterleaved ) ? true : false;
1874 UInt32 outChannels = stream_.nUserChannels[1];
1875 if ( stream_.doConvertBuffer[1] ) {
1876 outInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
1877 outChannels = stream_.nDeviceChannels[1];
1880 if ( outInterleaved ) outOffset = 1;
1881 else outOffset = stream_.bufferSize;
1883 channelsLeft = outChannels;
1884 for ( unsigned int i=0; i<handle->nStreams[1]; i++ ) {
1886 in = (Float32 *) inBufferList->mBuffers[handle->iStream[1]+i].mData;
1887 streamChannels = inBufferList->mBuffers[handle->iStream[1]+i].mNumberChannels;
1890 // Account for possible channel offset in first stream
1891 if ( i == 0 && stream_.channelOffset[1] > 0 ) {
1892 streamChannels -= stream_.channelOffset[1];
1893 inJump = stream_.channelOffset[1];
1897 // Account for possible unread channels at end of the last stream
1898 if ( streamChannels > channelsLeft ) {
1899 inJump = streamChannels - channelsLeft;
1900 streamChannels = channelsLeft;
1903 // Determine output buffer offsets and skips
1904 if ( outInterleaved ) {
1905 outJump = outChannels;
1906 out += outChannels - channelsLeft;
1910 out += (outChannels - channelsLeft) * outOffset;
1913 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
1914 for ( unsigned int j=0; j<streamChannels; j++ ) {
1915 out[j*outOffset] = *in++;
1920 channelsLeft -= streamChannels;
1924 if ( stream_.doConvertBuffer[1] ) { // convert from our internal "device" buffer
1925 convertBuffer( stream_.userBuffer[1],
1926 stream_.deviceBuffer,
1927 stream_.convertInfo[1] );
1933 //MUTEX_UNLOCK( &stream_.mutex );
1935 RtApi::tickStreamTime();
1939 const char* RtApiCore :: getErrorCode( OSStatus code )
1943 case kAudioHardwareNotRunningError:
1944 return "kAudioHardwareNotRunningError";
1946 case kAudioHardwareUnspecifiedError:
1947 return "kAudioHardwareUnspecifiedError";
1949 case kAudioHardwareUnknownPropertyError:
1950 return "kAudioHardwareUnknownPropertyError";
1952 case kAudioHardwareBadPropertySizeError:
1953 return "kAudioHardwareBadPropertySizeError";
1955 case kAudioHardwareIllegalOperationError:
1956 return "kAudioHardwareIllegalOperationError";
1958 case kAudioHardwareBadObjectError:
1959 return "kAudioHardwareBadObjectError";
1961 case kAudioHardwareBadDeviceError:
1962 return "kAudioHardwareBadDeviceError";
1964 case kAudioHardwareBadStreamError:
1965 return "kAudioHardwareBadStreamError";
1967 case kAudioHardwareUnsupportedOperationError:
1968 return "kAudioHardwareUnsupportedOperationError";
1970 case kAudioDeviceUnsupportedFormatError:
1971 return "kAudioDeviceUnsupportedFormatError";
1973 case kAudioDevicePermissionsError:
1974 return "kAudioDevicePermissionsError";
1977 return "CoreAudio unknown error";
1981 //******************** End of __MACOSX_CORE__ *********************//
1984 #if defined(__UNIX_JACK__)
1986 // JACK is a low-latency audio server, originally written for the
1987 // GNU/Linux operating system and now also ported to OS-X. It can
1988 // connect a number of different applications to an audio device, as
1989 // well as allowing them to share audio between themselves.
1991 // When using JACK with RtAudio, "devices" refer to JACK clients that
1992 // have ports connected to the server. The JACK server is typically
1993 // started in a terminal as follows:
1995 // .jackd -d alsa -d hw:0
1997 // or through an interface program such as qjackctl. Many of the
1998 // parameters normally set for a stream are fixed by the JACK server
1999 // and can be specified when the JACK server is started. In
2002 // .jackd -d alsa -d hw:0 -r 44100 -p 512 -n 4
2004 // specifies a sample rate of 44100 Hz, a buffer size of 512 sample
2005 // frames, and number of buffers = 4. Once the server is running, it
2006 // is not possible to override these values. If the values are not
2007 // specified in the command-line, the JACK server uses default values.
2009 // The JACK server does not have to be running when an instance of
2010 // RtApiJack is created, though the function getDeviceCount() will
2011 // report 0 devices found until JACK has been started. When no
2012 // devices are available (i.e., the JACK server is not running), a
2013 // stream cannot be opened.
2015 #include <jack/jack.h>
2019 // A structure to hold various information related to the Jack API
2022 jack_client_t *client;
2023 jack_port_t **ports[2];
2024 std::string deviceName[2];
2026 pthread_cond_t condition;
2027 int drainCounter; // Tracks callback counts when draining
2028 bool internalDrain; // Indicates if stop is initiated from callback or not.
2031 :client(0), drainCounter(0), internalDrain(false) { ports[0] = 0; ports[1] = 0; xrun[0] = false; xrun[1] = false; }
2034 #if !defined(__RTAUDIO_DEBUG__)
2035 static void jackSilentError( const char * ) {};
2038 RtApiJack :: RtApiJack()
2039 :shouldAutoconnect_(true) {
2040 // Nothing to do here.
2041 #if !defined(__RTAUDIO_DEBUG__)
2042 // Turn off Jack's internal error reporting.
2043 jack_set_error_function( &jackSilentError );
2047 RtApiJack :: ~RtApiJack()
2049 if ( stream_.state != STREAM_CLOSED ) closeStream();
2052 unsigned int RtApiJack :: getDeviceCount( void )
2054 // See if we can become a jack client.
2055 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
2056 jack_status_t *status = NULL;
2057 jack_client_t *client = jack_client_open( "RtApiJackCount", options, status );
2058 if ( client == 0 ) return 0;
2061 std::string port, previousPort;
2062 unsigned int nChannels = 0, nDevices = 0;
2063 ports = jack_get_ports( client, NULL, NULL, 0 );
2065 // Parse the port names up to the first colon (:).
2068 port = (char *) ports[ nChannels ];
2069 iColon = port.find(":");
2070 if ( iColon != std::string::npos ) {
2071 port = port.substr( 0, iColon + 1 );
2072 if ( port != previousPort ) {
2074 previousPort = port;
2077 } while ( ports[++nChannels] );
2081 jack_client_close( client );
2085 RtAudio::DeviceInfo RtApiJack :: getDeviceInfo( unsigned int device )
2087 RtAudio::DeviceInfo info;
2088 info.probed = false;
2090 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption
2091 jack_status_t *status = NULL;
2092 jack_client_t *client = jack_client_open( "RtApiJackInfo", options, status );
2093 if ( client == 0 ) {
2094 errorText_ = "RtApiJack::getDeviceInfo: Jack server not found or connection error!";
2095 error( RtAudioError::WARNING );
2100 std::string port, previousPort;
2101 unsigned int nPorts = 0, nDevices = 0;
2102 ports = jack_get_ports( client, NULL, NULL, 0 );
2104 // Parse the port names up to the first colon (:).
2107 port = (char *) ports[ nPorts ];
2108 iColon = port.find(":");
2109 if ( iColon != std::string::npos ) {
2110 port = port.substr( 0, iColon );
2111 if ( port != previousPort ) {
2112 if ( nDevices == device ) info.name = port;
2114 previousPort = port;
2117 } while ( ports[++nPorts] );
2121 if ( device >= nDevices ) {
2122 jack_client_close( client );
2123 errorText_ = "RtApiJack::getDeviceInfo: device ID is invalid!";
2124 error( RtAudioError::INVALID_USE );
2128 // Get the current jack server sample rate.
2129 info.sampleRates.clear();
2131 info.preferredSampleRate = jack_get_sample_rate( client );
2132 info.sampleRates.push_back( info.preferredSampleRate );
2134 // Count the available ports containing the client name as device
2135 // channels. Jack "input ports" equal RtAudio output channels.
2136 unsigned int nChannels = 0;
2137 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsInput );
2139 while ( ports[ nChannels ] ) nChannels++;
2141 info.outputChannels = nChannels;
2144 // Jack "output ports" equal RtAudio input channels.
2146 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsOutput );
2148 while ( ports[ nChannels ] ) nChannels++;
2150 info.inputChannels = nChannels;
2153 if ( info.outputChannels == 0 && info.inputChannels == 0 ) {
2154 jack_client_close(client);
2155 errorText_ = "RtApiJack::getDeviceInfo: error determining Jack input/output channels!";
2156 error( RtAudioError::WARNING );
2160 // If device opens for both playback and capture, we determine the channels.
2161 if ( info.outputChannels > 0 && info.inputChannels > 0 )
2162 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
2164 // Jack always uses 32-bit floats.
2165 info.nativeFormats = RTAUDIO_FLOAT32;
2167 // Jack doesn't provide default devices so we'll use the first available one.
2168 if ( device == 0 && info.outputChannels > 0 )
2169 info.isDefaultOutput = true;
2170 if ( device == 0 && info.inputChannels > 0 )
2171 info.isDefaultInput = true;
2173 jack_client_close(client);
2178 static int jackCallbackHandler( jack_nframes_t nframes, void *infoPointer )
2180 CallbackInfo *info = (CallbackInfo *) infoPointer;
2182 RtApiJack *object = (RtApiJack *) info->object;
2183 if ( object->callbackEvent( (unsigned long) nframes ) == false ) return 1;
2188 // This function will be called by a spawned thread when the Jack
2189 // server signals that it is shutting down. It is necessary to handle
2190 // it this way because the jackShutdown() function must return before
2191 // the jack_deactivate() function (in closeStream()) will return.
2192 static void *jackCloseStream( void *ptr )
2194 CallbackInfo *info = (CallbackInfo *) ptr;
2195 RtApiJack *object = (RtApiJack *) info->object;
2197 object->closeStream();
2199 pthread_exit( NULL );
2201 static void jackShutdown( void *infoPointer )
2203 CallbackInfo *info = (CallbackInfo *) infoPointer;
2204 RtApiJack *object = (RtApiJack *) info->object;
2206 // Check current stream state. If stopped, then we'll assume this
2207 // was called as a result of a call to RtApiJack::stopStream (the
2208 // deactivation of a client handle causes this function to be called).
2209 // If not, we'll assume the Jack server is shutting down or some
2210 // other problem occurred and we should close the stream.
2211 if ( object->isStreamRunning() == false ) return;
2213 ThreadHandle threadId;
2214 pthread_create( &threadId, NULL, jackCloseStream, info );
2215 std::cerr << "\nRtApiJack: the Jack server is shutting down this client ... stream stopped and closed!!\n" << std::endl;
2218 static int jackXrun( void *infoPointer )
2220 JackHandle *handle = *((JackHandle **) infoPointer);
2222 if ( handle->ports[0] ) handle->xrun[0] = true;
2223 if ( handle->ports[1] ) handle->xrun[1] = true;
2228 bool RtApiJack :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
2229 unsigned int firstChannel, unsigned int sampleRate,
2230 RtAudioFormat format, unsigned int *bufferSize,
2231 RtAudio::StreamOptions *options )
2233 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2235 // Look for jack server and try to become a client (only do once per stream).
2236 jack_client_t *client = 0;
2237 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) {
2238 jack_options_t jackoptions = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
2239 jack_status_t *status = NULL;
2240 if ( options && !options->streamName.empty() )
2241 client = jack_client_open( options->streamName.c_str(), jackoptions, status );
2243 client = jack_client_open( "RtApiJack", jackoptions, status );
2244 if ( client == 0 ) {
2245 errorText_ = "RtApiJack::probeDeviceOpen: Jack server not found or connection error!";
2246 error( RtAudioError::WARNING );
2251 // The handle must have been created on an earlier pass.
2252 client = handle->client;
2256 std::string port, previousPort, deviceName;
2257 unsigned int nPorts = 0, nDevices = 0;
2258 ports = jack_get_ports( client, NULL, NULL, 0 );
2260 // Parse the port names up to the first colon (:).
2263 port = (char *) ports[ nPorts ];
2264 iColon = port.find(":");
2265 if ( iColon != std::string::npos ) {
2266 port = port.substr( 0, iColon );
2267 if ( port != previousPort ) {
2268 if ( nDevices == device ) deviceName = port;
2270 previousPort = port;
2273 } while ( ports[++nPorts] );
2277 if ( device >= nDevices ) {
2278 errorText_ = "RtApiJack::probeDeviceOpen: device ID is invalid!";
2282 // Count the available ports containing the client name as device
2283 // channels. Jack "input ports" equal RtAudio output channels.
2284 unsigned int nChannels = 0;
2285 unsigned long flag = JackPortIsInput;
2286 if ( mode == INPUT ) flag = JackPortIsOutput;
2287 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
2289 while ( ports[ nChannels ] ) nChannels++;
2293 // Compare the jack ports for specified client to the requested number of channels.
2294 if ( nChannels < (channels + firstChannel) ) {
2295 errorStream_ << "RtApiJack::probeDeviceOpen: requested number of channels (" << channels << ") + offset (" << firstChannel << ") not found for specified device (" << device << ":" << deviceName << ").";
2296 errorText_ = errorStream_.str();
2300 // Check the jack server sample rate.
2301 unsigned int jackRate = jack_get_sample_rate( client );
2302 if ( sampleRate != jackRate ) {
2303 jack_client_close( client );
2304 errorStream_ << "RtApiJack::probeDeviceOpen: the requested sample rate (" << sampleRate << ") is different than the JACK server rate (" << jackRate << ").";
2305 errorText_ = errorStream_.str();
2308 stream_.sampleRate = jackRate;
2310 // Get the latency of the JACK port.
2311 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
2312 if ( ports[ firstChannel ] ) {
2314 jack_latency_callback_mode_t cbmode = (mode == INPUT ? JackCaptureLatency : JackPlaybackLatency);
2315 // the range (usually the min and max are equal)
2316 jack_latency_range_t latrange; latrange.min = latrange.max = 0;
2317 // get the latency range
2318 jack_port_get_latency_range( jack_port_by_name( client, ports[firstChannel] ), cbmode, &latrange );
2319 // be optimistic, use the min!
2320 stream_.latency[mode] = latrange.min;
2321 //stream_.latency[mode] = jack_port_get_latency( jack_port_by_name( client, ports[ firstChannel ] ) );
2325 // The jack server always uses 32-bit floating-point data.
2326 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
2327 stream_.userFormat = format;
2329 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
2330 else stream_.userInterleaved = true;
2332 // Jack always uses non-interleaved buffers.
2333 stream_.deviceInterleaved[mode] = false;
2335 // Jack always provides host byte-ordered data.
2336 stream_.doByteSwap[mode] = false;
2338 // Get the buffer size. The buffer size and number of buffers
2339 // (periods) is set when the jack server is started.
2340 stream_.bufferSize = (int) jack_get_buffer_size( client );
2341 *bufferSize = stream_.bufferSize;
2343 stream_.nDeviceChannels[mode] = channels;
2344 stream_.nUserChannels[mode] = channels;
2346 // Set flags for buffer conversion.
2347 stream_.doConvertBuffer[mode] = false;
2348 if ( stream_.userFormat != stream_.deviceFormat[mode] )
2349 stream_.doConvertBuffer[mode] = true;
2350 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
2351 stream_.nUserChannels[mode] > 1 )
2352 stream_.doConvertBuffer[mode] = true;
2354 // Allocate our JackHandle structure for the stream.
2355 if ( handle == 0 ) {
2357 handle = new JackHandle;
2359 catch ( std::bad_alloc& ) {
2360 errorText_ = "RtApiJack::probeDeviceOpen: error allocating JackHandle memory.";
2364 if ( pthread_cond_init(&handle->condition, NULL) ) {
2365 errorText_ = "RtApiJack::probeDeviceOpen: error initializing pthread condition variable.";
2368 stream_.apiHandle = (void *) handle;
2369 handle->client = client;
2371 handle->deviceName[mode] = deviceName;
2373 // Allocate necessary internal buffers.
2374 unsigned long bufferBytes;
2375 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
2376 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
2377 if ( stream_.userBuffer[mode] == NULL ) {
2378 errorText_ = "RtApiJack::probeDeviceOpen: error allocating user buffer memory.";
2382 if ( stream_.doConvertBuffer[mode] ) {
2384 bool makeBuffer = true;
2385 if ( mode == OUTPUT )
2386 bufferBytes = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
2387 else { // mode == INPUT
2388 bufferBytes = stream_.nDeviceChannels[1] * formatBytes( stream_.deviceFormat[1] );
2389 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
2390 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes(stream_.deviceFormat[0]);
2391 if ( bufferBytes < bytesOut ) makeBuffer = false;
2396 bufferBytes *= *bufferSize;
2397 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
2398 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
2399 if ( stream_.deviceBuffer == NULL ) {
2400 errorText_ = "RtApiJack::probeDeviceOpen: error allocating device buffer memory.";
2406 // Allocate memory for the Jack ports (channels) identifiers.
2407 handle->ports[mode] = (jack_port_t **) malloc ( sizeof (jack_port_t *) * channels );
2408 if ( handle->ports[mode] == NULL ) {
2409 errorText_ = "RtApiJack::probeDeviceOpen: error allocating port memory.";
2413 stream_.device[mode] = device;
2414 stream_.channelOffset[mode] = firstChannel;
2415 stream_.state = STREAM_STOPPED;
2416 stream_.callbackInfo.object = (void *) this;
2418 if ( stream_.mode == OUTPUT && mode == INPUT )
2419 // We had already set up the stream for output.
2420 stream_.mode = DUPLEX;
2422 stream_.mode = mode;
2423 jack_set_process_callback( handle->client, jackCallbackHandler, (void *) &stream_.callbackInfo );
2424 jack_set_xrun_callback( handle->client, jackXrun, (void *) &stream_.apiHandle );
2425 jack_on_shutdown( handle->client, jackShutdown, (void *) &stream_.callbackInfo );
2428 // Register our ports.
2430 if ( mode == OUTPUT ) {
2431 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2432 snprintf( label, 64, "outport %d", i );
2433 handle->ports[0][i] = jack_port_register( handle->client, (const char *)label,
2434 JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput, 0 );
2438 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2439 snprintf( label, 64, "inport %d", i );
2440 handle->ports[1][i] = jack_port_register( handle->client, (const char *)label,
2441 JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput, 0 );
2445 // Setup the buffer conversion information structure. We don't use
2446 // buffers to do channel offsets, so we override that parameter
2448 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
2450 if ( options && options->flags & RTAUDIO_JACK_DONT_CONNECT ) shouldAutoconnect_ = false;
2456 pthread_cond_destroy( &handle->condition );
2457 jack_client_close( handle->client );
2459 if ( handle->ports[0] ) free( handle->ports[0] );
2460 if ( handle->ports[1] ) free( handle->ports[1] );
2463 stream_.apiHandle = 0;
2466 for ( int i=0; i<2; i++ ) {
2467 if ( stream_.userBuffer[i] ) {
2468 free( stream_.userBuffer[i] );
2469 stream_.userBuffer[i] = 0;
2473 if ( stream_.deviceBuffer ) {
2474 free( stream_.deviceBuffer );
2475 stream_.deviceBuffer = 0;
2481 void RtApiJack :: closeStream( void )
2483 if ( stream_.state == STREAM_CLOSED ) {
2484 errorText_ = "RtApiJack::closeStream(): no open stream to close!";
2485 error( RtAudioError::WARNING );
2489 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2492 if ( stream_.state == STREAM_RUNNING )
2493 jack_deactivate( handle->client );
2495 jack_client_close( handle->client );
2499 if ( handle->ports[0] ) free( handle->ports[0] );
2500 if ( handle->ports[1] ) free( handle->ports[1] );
2501 pthread_cond_destroy( &handle->condition );
2503 stream_.apiHandle = 0;
2506 for ( int i=0; i<2; i++ ) {
2507 if ( stream_.userBuffer[i] ) {
2508 free( stream_.userBuffer[i] );
2509 stream_.userBuffer[i] = 0;
2513 if ( stream_.deviceBuffer ) {
2514 free( stream_.deviceBuffer );
2515 stream_.deviceBuffer = 0;
2518 stream_.mode = UNINITIALIZED;
2519 stream_.state = STREAM_CLOSED;
2522 void RtApiJack :: startStream( void )
2525 if ( stream_.state == STREAM_RUNNING ) {
2526 errorText_ = "RtApiJack::startStream(): the stream is already running!";
2527 error( RtAudioError::WARNING );
2531 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2532 int result = jack_activate( handle->client );
2534 errorText_ = "RtApiJack::startStream(): unable to activate JACK client!";
2540 // Get the list of available ports.
2541 if ( shouldAutoconnect_ && (stream_.mode == OUTPUT || stream_.mode == DUPLEX) ) {
2543 ports = jack_get_ports( handle->client, handle->deviceName[0].c_str(), NULL, JackPortIsInput);
2544 if ( ports == NULL) {
2545 errorText_ = "RtApiJack::startStream(): error determining available JACK input ports!";
2549 // Now make the port connections. Since RtAudio wasn't designed to
2550 // allow the user to select particular channels of a device, we'll
2551 // just open the first "nChannels" ports with offset.
2552 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2554 if ( ports[ stream_.channelOffset[0] + i ] )
2555 result = jack_connect( handle->client, jack_port_name( handle->ports[0][i] ), ports[ stream_.channelOffset[0] + i ] );
2558 errorText_ = "RtApiJack::startStream(): error connecting output ports!";
2565 if ( shouldAutoconnect_ && (stream_.mode == INPUT || stream_.mode == DUPLEX) ) {
2567 ports = jack_get_ports( handle->client, handle->deviceName[1].c_str(), NULL, JackPortIsOutput );
2568 if ( ports == NULL) {
2569 errorText_ = "RtApiJack::startStream(): error determining available JACK output ports!";
2573 // Now make the port connections. See note above.
2574 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2576 if ( ports[ stream_.channelOffset[1] + i ] )
2577 result = jack_connect( handle->client, ports[ stream_.channelOffset[1] + i ], jack_port_name( handle->ports[1][i] ) );
2580 errorText_ = "RtApiJack::startStream(): error connecting input ports!";
2587 handle->drainCounter = 0;
2588 handle->internalDrain = false;
2589 stream_.state = STREAM_RUNNING;
2592 if ( result == 0 ) return;
2593 error( RtAudioError::SYSTEM_ERROR );
2596 void RtApiJack :: stopStream( void )
2599 if ( stream_.state == STREAM_STOPPED ) {
2600 errorText_ = "RtApiJack::stopStream(): the stream is already stopped!";
2601 error( RtAudioError::WARNING );
2605 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2606 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
2608 if ( handle->drainCounter == 0 ) {
2609 handle->drainCounter = 2;
2610 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
2614 jack_deactivate( handle->client );
2615 stream_.state = STREAM_STOPPED;
2618 void RtApiJack :: abortStream( void )
2621 if ( stream_.state == STREAM_STOPPED ) {
2622 errorText_ = "RtApiJack::abortStream(): the stream is already stopped!";
2623 error( RtAudioError::WARNING );
2627 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2628 handle->drainCounter = 2;
2633 // This function will be called by a spawned thread when the user
2634 // callback function signals that the stream should be stopped or
2635 // aborted. It is necessary to handle it this way because the
2636 // callbackEvent() function must return before the jack_deactivate()
2637 // function will return.
2638 static void *jackStopStream( void *ptr )
2640 CallbackInfo *info = (CallbackInfo *) ptr;
2641 RtApiJack *object = (RtApiJack *) info->object;
2643 object->stopStream();
2644 pthread_exit( NULL );
2647 bool RtApiJack :: callbackEvent( unsigned long nframes )
2649 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
2650 if ( stream_.state == STREAM_CLOSED ) {
2651 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
2652 error( RtAudioError::WARNING );
2655 if ( stream_.bufferSize != nframes ) {
2656 errorText_ = "RtApiCore::callbackEvent(): the JACK buffer size has changed ... cannot process!";
2657 error( RtAudioError::WARNING );
2661 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
2662 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2664 // Check if we were draining the stream and signal is finished.
2665 if ( handle->drainCounter > 3 ) {
2666 ThreadHandle threadId;
2668 stream_.state = STREAM_STOPPING;
2669 if ( handle->internalDrain == true )
2670 pthread_create( &threadId, NULL, jackStopStream, info );
2672 pthread_cond_signal( &handle->condition );
2676 // Invoke user callback first, to get fresh output data.
2677 if ( handle->drainCounter == 0 ) {
2678 RtAudioCallback callback = (RtAudioCallback) info->callback;
2679 double streamTime = getStreamTime();
2680 RtAudioStreamStatus status = 0;
2681 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
2682 status |= RTAUDIO_OUTPUT_UNDERFLOW;
2683 handle->xrun[0] = false;
2685 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
2686 status |= RTAUDIO_INPUT_OVERFLOW;
2687 handle->xrun[1] = false;
2689 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
2690 stream_.bufferSize, streamTime, status, info->userData );
2691 if ( cbReturnValue == 2 ) {
2692 stream_.state = STREAM_STOPPING;
2693 handle->drainCounter = 2;
2695 pthread_create( &id, NULL, jackStopStream, info );
2698 else if ( cbReturnValue == 1 ) {
2699 handle->drainCounter = 1;
2700 handle->internalDrain = true;
2704 jack_default_audio_sample_t *jackbuffer;
2705 unsigned long bufferBytes = nframes * sizeof( jack_default_audio_sample_t );
2706 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
2708 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
2710 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
2711 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2712 memset( jackbuffer, 0, bufferBytes );
2716 else if ( stream_.doConvertBuffer[0] ) {
2718 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
2720 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
2721 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2722 memcpy( jackbuffer, &stream_.deviceBuffer[i*bufferBytes], bufferBytes );
2725 else { // no buffer conversion
2726 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2727 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2728 memcpy( jackbuffer, &stream_.userBuffer[0][i*bufferBytes], bufferBytes );
2733 // Don't bother draining input
2734 if ( handle->drainCounter ) {
2735 handle->drainCounter++;
2739 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
2741 if ( stream_.doConvertBuffer[1] ) {
2742 for ( unsigned int i=0; i<stream_.nDeviceChannels[1]; i++ ) {
2743 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
2744 memcpy( &stream_.deviceBuffer[i*bufferBytes], jackbuffer, bufferBytes );
2746 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
2748 else { // no buffer conversion
2749 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2750 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
2751 memcpy( &stream_.userBuffer[1][i*bufferBytes], jackbuffer, bufferBytes );
2757 RtApi::tickStreamTime();
2760 //******************** End of __UNIX_JACK__ *********************//
2763 #if defined(__WINDOWS_ASIO__) // ASIO API on Windows
2765 // The ASIO API is designed around a callback scheme, so this
2766 // implementation is similar to that used for OS-X CoreAudio and Linux
2767 // Jack. The primary constraint with ASIO is that it only allows
2768 // access to a single driver at a time. Thus, it is not possible to
2769 // have more than one simultaneous RtAudio stream.
2771 // This implementation also requires a number of external ASIO files
2772 // and a few global variables. The ASIO callback scheme does not
2773 // allow for the passing of user data, so we must create a global
2774 // pointer to our callbackInfo structure.
2776 // On unix systems, we make use of a pthread condition variable.
2777 // Since there is no equivalent in Windows, I hacked something based
2778 // on information found in
2779 // http://www.cs.wustl.edu/~schmidt/win32-cv-1.html.
2781 #include "asiosys.h"
2783 #include "iasiothiscallresolver.h"
2784 #include "asiodrivers.h"
2787 static AsioDrivers drivers;
2788 static ASIOCallbacks asioCallbacks;
2789 static ASIODriverInfo driverInfo;
2790 static CallbackInfo *asioCallbackInfo;
2791 static bool asioXRun;
2794 int drainCounter; // Tracks callback counts when draining
2795 bool internalDrain; // Indicates if stop is initiated from callback or not.
2796 ASIOBufferInfo *bufferInfos;
2800 :drainCounter(0), internalDrain(false), bufferInfos(0) {}
2803 // Function declarations (definitions at end of section)
2804 static const char* getAsioErrorString( ASIOError result );
2805 static void sampleRateChanged( ASIOSampleRate sRate );
2806 static long asioMessages( long selector, long value, void* message, double* opt );
2808 RtApiAsio :: RtApiAsio()
2810 // ASIO cannot run on a multi-threaded appartment. You can call
2811 // CoInitialize beforehand, but it must be for appartment threading
2812 // (in which case, CoInitilialize will return S_FALSE here).
2813 coInitialized_ = false;
2814 HRESULT hr = CoInitialize( NULL );
2816 errorText_ = "RtApiAsio::ASIO requires a single-threaded appartment. Call CoInitializeEx(0,COINIT_APARTMENTTHREADED)";
2817 error( RtAudioError::WARNING );
2819 coInitialized_ = true;
2821 drivers.removeCurrentDriver();
2822 driverInfo.asioVersion = 2;
2824 // See note in DirectSound implementation about GetDesktopWindow().
2825 driverInfo.sysRef = GetForegroundWindow();
2828 RtApiAsio :: ~RtApiAsio()
2830 if ( stream_.state != STREAM_CLOSED ) closeStream();
2831 if ( coInitialized_ ) CoUninitialize();
2834 unsigned int RtApiAsio :: getDeviceCount( void )
2836 return (unsigned int) drivers.asioGetNumDev();
2839 RtAudio::DeviceInfo RtApiAsio :: getDeviceInfo( unsigned int device )
2841 RtAudio::DeviceInfo info;
2842 info.probed = false;
2845 unsigned int nDevices = getDeviceCount();
2846 if ( nDevices == 0 ) {
2847 errorText_ = "RtApiAsio::getDeviceInfo: no devices found!";
2848 error( RtAudioError::INVALID_USE );
2852 if ( device >= nDevices ) {
2853 errorText_ = "RtApiAsio::getDeviceInfo: device ID is invalid!";
2854 error( RtAudioError::INVALID_USE );
2858 // If a stream is already open, we cannot probe other devices. Thus, use the saved results.
2859 if ( stream_.state != STREAM_CLOSED ) {
2860 if ( device >= devices_.size() ) {
2861 errorText_ = "RtApiAsio::getDeviceInfo: device ID was not present before stream was opened.";
2862 error( RtAudioError::WARNING );
2865 return devices_[ device ];
2868 char driverName[32];
2869 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
2870 if ( result != ASE_OK ) {
2871 errorStream_ << "RtApiAsio::getDeviceInfo: unable to get driver name (" << getAsioErrorString( result ) << ").";
2872 errorText_ = errorStream_.str();
2873 error( RtAudioError::WARNING );
2877 info.name = driverName;
2879 if ( !drivers.loadDriver( driverName ) ) {
2880 errorStream_ << "RtApiAsio::getDeviceInfo: unable to load driver (" << driverName << ").";
2881 errorText_ = errorStream_.str();
2882 error( RtAudioError::WARNING );
2886 result = ASIOInit( &driverInfo );
2887 if ( result != ASE_OK ) {
2888 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
2889 errorText_ = errorStream_.str();
2890 error( RtAudioError::WARNING );
2894 // Determine the device channel information.
2895 long inputChannels, outputChannels;
2896 result = ASIOGetChannels( &inputChannels, &outputChannels );
2897 if ( result != ASE_OK ) {
2898 drivers.removeCurrentDriver();
2899 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
2900 errorText_ = errorStream_.str();
2901 error( RtAudioError::WARNING );
2905 info.outputChannels = outputChannels;
2906 info.inputChannels = inputChannels;
2907 if ( info.outputChannels > 0 && info.inputChannels > 0 )
2908 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
2910 // Determine the supported sample rates.
2911 info.sampleRates.clear();
2912 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
2913 result = ASIOCanSampleRate( (ASIOSampleRate) SAMPLE_RATES[i] );
2914 if ( result == ASE_OK ) {
2915 info.sampleRates.push_back( SAMPLE_RATES[i] );
2917 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
2918 info.preferredSampleRate = SAMPLE_RATES[i];
2922 // Determine supported data types ... just check first channel and assume rest are the same.
2923 ASIOChannelInfo channelInfo;
2924 channelInfo.channel = 0;
2925 channelInfo.isInput = true;
2926 if ( info.inputChannels <= 0 ) channelInfo.isInput = false;
2927 result = ASIOGetChannelInfo( &channelInfo );
2928 if ( result != ASE_OK ) {
2929 drivers.removeCurrentDriver();
2930 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting driver channel info (" << driverName << ").";
2931 errorText_ = errorStream_.str();
2932 error( RtAudioError::WARNING );
2936 info.nativeFormats = 0;
2937 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB )
2938 info.nativeFormats |= RTAUDIO_SINT16;
2939 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB )
2940 info.nativeFormats |= RTAUDIO_SINT32;
2941 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB )
2942 info.nativeFormats |= RTAUDIO_FLOAT32;
2943 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB )
2944 info.nativeFormats |= RTAUDIO_FLOAT64;
2945 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB )
2946 info.nativeFormats |= RTAUDIO_SINT24;
2948 if ( info.outputChannels > 0 )
2949 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
2950 if ( info.inputChannels > 0 )
2951 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
2954 drivers.removeCurrentDriver();
2958 static void bufferSwitch( long index, ASIOBool /*processNow*/ )
2960 RtApiAsio *object = (RtApiAsio *) asioCallbackInfo->object;
2961 object->callbackEvent( index );
2964 void RtApiAsio :: saveDeviceInfo( void )
2968 unsigned int nDevices = getDeviceCount();
2969 devices_.resize( nDevices );
2970 for ( unsigned int i=0; i<nDevices; i++ )
2971 devices_[i] = getDeviceInfo( i );
2974 bool RtApiAsio :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
2975 unsigned int firstChannel, unsigned int sampleRate,
2976 RtAudioFormat format, unsigned int *bufferSize,
2977 RtAudio::StreamOptions *options )
2978 {////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
2980 bool isDuplexInput = mode == INPUT && stream_.mode == OUTPUT;
2982 // For ASIO, a duplex stream MUST use the same driver.
2983 if ( isDuplexInput && stream_.device[0] != device ) {
2984 errorText_ = "RtApiAsio::probeDeviceOpen: an ASIO duplex stream must use the same device for input and output!";
2988 char driverName[32];
2989 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
2990 if ( result != ASE_OK ) {
2991 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to get driver name (" << getAsioErrorString( result ) << ").";
2992 errorText_ = errorStream_.str();
2996 // Only load the driver once for duplex stream.
2997 if ( !isDuplexInput ) {
2998 // The getDeviceInfo() function will not work when a stream is open
2999 // because ASIO does not allow multiple devices to run at the same
3000 // time. Thus, we'll probe the system before opening a stream and
3001 // save the results for use by getDeviceInfo().
3002 this->saveDeviceInfo();
3004 if ( !drivers.loadDriver( driverName ) ) {
3005 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to load driver (" << driverName << ").";
3006 errorText_ = errorStream_.str();
3010 result = ASIOInit( &driverInfo );
3011 if ( result != ASE_OK ) {
3012 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
3013 errorText_ = errorStream_.str();
3018 // keep them before any "goto error", they are used for error cleanup + goto device boundary checks
3019 bool buffersAllocated = false;
3020 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3021 unsigned int nChannels;
3024 // Check the device channel count.
3025 long inputChannels, outputChannels;
3026 result = ASIOGetChannels( &inputChannels, &outputChannels );
3027 if ( result != ASE_OK ) {
3028 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
3029 errorText_ = errorStream_.str();
3033 if ( ( mode == OUTPUT && (channels+firstChannel) > (unsigned int) outputChannels) ||
3034 ( mode == INPUT && (channels+firstChannel) > (unsigned int) inputChannels) ) {
3035 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested channel count (" << channels << ") + offset (" << firstChannel << ").";
3036 errorText_ = errorStream_.str();
3039 stream_.nDeviceChannels[mode] = channels;
3040 stream_.nUserChannels[mode] = channels;
3041 stream_.channelOffset[mode] = firstChannel;
3043 // Verify the sample rate is supported.
3044 result = ASIOCanSampleRate( (ASIOSampleRate) sampleRate );
3045 if ( result != ASE_OK ) {
3046 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested sample rate (" << sampleRate << ").";
3047 errorText_ = errorStream_.str();
3051 // Get the current sample rate
3052 ASIOSampleRate currentRate;
3053 result = ASIOGetSampleRate( ¤tRate );
3054 if ( result != ASE_OK ) {
3055 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error getting sample rate.";
3056 errorText_ = errorStream_.str();
3060 // Set the sample rate only if necessary
3061 if ( currentRate != sampleRate ) {
3062 result = ASIOSetSampleRate( (ASIOSampleRate) sampleRate );
3063 if ( result != ASE_OK ) {
3064 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error setting sample rate (" << sampleRate << ").";
3065 errorText_ = errorStream_.str();
3070 // Determine the driver data type.
3071 ASIOChannelInfo channelInfo;
3072 channelInfo.channel = 0;
3073 if ( mode == OUTPUT ) channelInfo.isInput = false;
3074 else channelInfo.isInput = true;
3075 result = ASIOGetChannelInfo( &channelInfo );
3076 if ( result != ASE_OK ) {
3077 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting data format.";
3078 errorText_ = errorStream_.str();
3082 // Assuming WINDOWS host is always little-endian.
3083 stream_.doByteSwap[mode] = false;
3084 stream_.userFormat = format;
3085 stream_.deviceFormat[mode] = 0;
3086 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB ) {
3087 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
3088 if ( channelInfo.type == ASIOSTInt16MSB ) stream_.doByteSwap[mode] = true;
3090 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB ) {
3091 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
3092 if ( channelInfo.type == ASIOSTInt32MSB ) stream_.doByteSwap[mode] = true;
3094 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB ) {
3095 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
3096 if ( channelInfo.type == ASIOSTFloat32MSB ) stream_.doByteSwap[mode] = true;
3098 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB ) {
3099 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
3100 if ( channelInfo.type == ASIOSTFloat64MSB ) stream_.doByteSwap[mode] = true;
3102 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB ) {
3103 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
3104 if ( channelInfo.type == ASIOSTInt24MSB ) stream_.doByteSwap[mode] = true;
3107 if ( stream_.deviceFormat[mode] == 0 ) {
3108 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") data format not supported by RtAudio.";
3109 errorText_ = errorStream_.str();
3113 // Set the buffer size. For a duplex stream, this will end up
3114 // setting the buffer size based on the input constraints, which
3116 long minSize, maxSize, preferSize, granularity;
3117 result = ASIOGetBufferSize( &minSize, &maxSize, &preferSize, &granularity );
3118 if ( result != ASE_OK ) {
3119 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting buffer size.";
3120 errorText_ = errorStream_.str();
3124 if ( isDuplexInput ) {
3125 // When this is the duplex input (output was opened before), then we have to use the same
3126 // buffersize as the output, because it might use the preferred buffer size, which most
3127 // likely wasn't passed as input to this. The buffer sizes have to be identically anyway,
3128 // So instead of throwing an error, make them equal. The caller uses the reference
3129 // to the "bufferSize" param as usual to set up processing buffers.
3131 *bufferSize = stream_.bufferSize;
3134 if ( *bufferSize == 0 ) *bufferSize = preferSize;
3135 else if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
3136 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
3137 else if ( granularity == -1 ) {
3138 // Make sure bufferSize is a power of two.
3139 int log2_of_min_size = 0;
3140 int log2_of_max_size = 0;
3142 for ( unsigned int i = 0; i < sizeof(long) * 8; i++ ) {
3143 if ( minSize & ((long)1 << i) ) log2_of_min_size = i;
3144 if ( maxSize & ((long)1 << i) ) log2_of_max_size = i;
3147 long min_delta = std::abs( (long)*bufferSize - ((long)1 << log2_of_min_size) );
3148 int min_delta_num = log2_of_min_size;
3150 for (int i = log2_of_min_size + 1; i <= log2_of_max_size; i++) {
3151 long current_delta = std::abs( (long)*bufferSize - ((long)1 << i) );
3152 if (current_delta < min_delta) {
3153 min_delta = current_delta;
3158 *bufferSize = ( (unsigned int)1 << min_delta_num );
3159 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
3160 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
3162 else if ( granularity != 0 ) {
3163 // Set to an even multiple of granularity, rounding up.
3164 *bufferSize = (*bufferSize + granularity-1) / granularity * granularity;
3169 // we don't use it anymore, see above!
3170 // Just left it here for the case...
3171 if ( isDuplexInput && stream_.bufferSize != *bufferSize ) {
3172 errorText_ = "RtApiAsio::probeDeviceOpen: input/output buffersize discrepancy!";
3177 stream_.bufferSize = *bufferSize;
3178 stream_.nBuffers = 2;
3180 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
3181 else stream_.userInterleaved = true;
3183 // ASIO always uses non-interleaved buffers.
3184 stream_.deviceInterleaved[mode] = false;
3186 // Allocate, if necessary, our AsioHandle structure for the stream.
3187 if ( handle == 0 ) {
3189 handle = new AsioHandle;
3191 catch ( std::bad_alloc& ) {
3192 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating AsioHandle memory.";
3195 handle->bufferInfos = 0;
3197 // Create a manual-reset event.
3198 handle->condition = CreateEvent( NULL, // no security
3199 TRUE, // manual-reset
3200 FALSE, // non-signaled initially
3202 stream_.apiHandle = (void *) handle;
3205 // Create the ASIO internal buffers. Since RtAudio sets up input
3206 // and output separately, we'll have to dispose of previously
3207 // created output buffers for a duplex stream.
3208 if ( mode == INPUT && stream_.mode == OUTPUT ) {
3209 ASIODisposeBuffers();
3210 if ( handle->bufferInfos ) free( handle->bufferInfos );
3213 // Allocate, initialize, and save the bufferInfos in our stream callbackInfo structure.
3215 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
3216 handle->bufferInfos = (ASIOBufferInfo *) malloc( nChannels * sizeof(ASIOBufferInfo) );
3217 if ( handle->bufferInfos == NULL ) {
3218 errorStream_ << "RtApiAsio::probeDeviceOpen: error allocating bufferInfo memory for driver (" << driverName << ").";
3219 errorText_ = errorStream_.str();
3223 ASIOBufferInfo *infos;
3224 infos = handle->bufferInfos;
3225 for ( i=0; i<stream_.nDeviceChannels[0]; i++, infos++ ) {
3226 infos->isInput = ASIOFalse;
3227 infos->channelNum = i + stream_.channelOffset[0];
3228 infos->buffers[0] = infos->buffers[1] = 0;
3230 for ( i=0; i<stream_.nDeviceChannels[1]; i++, infos++ ) {
3231 infos->isInput = ASIOTrue;
3232 infos->channelNum = i + stream_.channelOffset[1];
3233 infos->buffers[0] = infos->buffers[1] = 0;
3236 // prepare for callbacks
3237 stream_.sampleRate = sampleRate;
3238 stream_.device[mode] = device;
3239 stream_.mode = isDuplexInput ? DUPLEX : mode;
3241 // store this class instance before registering callbacks, that are going to use it
3242 asioCallbackInfo = &stream_.callbackInfo;
3243 stream_.callbackInfo.object = (void *) this;
3245 // Set up the ASIO callback structure and create the ASIO data buffers.
3246 asioCallbacks.bufferSwitch = &bufferSwitch;
3247 asioCallbacks.sampleRateDidChange = &sampleRateChanged;
3248 asioCallbacks.asioMessage = &asioMessages;
3249 asioCallbacks.bufferSwitchTimeInfo = NULL;
3250 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
3251 if ( result != ASE_OK ) {
3252 // Standard method failed. This can happen with strict/misbehaving drivers that return valid buffer size ranges
3253 // but only accept the preferred buffer size as parameter for ASIOCreateBuffers. eg. Creatives ASIO driver
3254 // in that case, let's be naïve and try that instead
3255 *bufferSize = preferSize;
3256 stream_.bufferSize = *bufferSize;
3257 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
3260 if ( result != ASE_OK ) {
3261 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") creating buffers.";
3262 errorText_ = errorStream_.str();
3265 buffersAllocated = true;
3266 stream_.state = STREAM_STOPPED;
3268 // Set flags for buffer conversion.
3269 stream_.doConvertBuffer[mode] = false;
3270 if ( stream_.userFormat != stream_.deviceFormat[mode] )
3271 stream_.doConvertBuffer[mode] = true;
3272 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
3273 stream_.nUserChannels[mode] > 1 )
3274 stream_.doConvertBuffer[mode] = true;
3276 // Allocate necessary internal buffers
3277 unsigned long bufferBytes;
3278 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
3279 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
3280 if ( stream_.userBuffer[mode] == NULL ) {
3281 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating user buffer memory.";
3285 if ( stream_.doConvertBuffer[mode] ) {
3287 bool makeBuffer = true;
3288 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
3289 if ( isDuplexInput && stream_.deviceBuffer ) {
3290 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
3291 if ( bufferBytes <= bytesOut ) makeBuffer = false;
3295 bufferBytes *= *bufferSize;
3296 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
3297 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
3298 if ( stream_.deviceBuffer == NULL ) {
3299 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating device buffer memory.";
3305 // Determine device latencies
3306 long inputLatency, outputLatency;
3307 result = ASIOGetLatencies( &inputLatency, &outputLatency );
3308 if ( result != ASE_OK ) {
3309 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting latency.";
3310 errorText_ = errorStream_.str();
3311 error( RtAudioError::WARNING); // warn but don't fail
3314 stream_.latency[0] = outputLatency;
3315 stream_.latency[1] = inputLatency;
3318 // Setup the buffer conversion information structure. We don't use
3319 // buffers to do channel offsets, so we override that parameter
3321 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
3326 if ( !isDuplexInput ) {
3327 // the cleanup for error in the duplex input, is done by RtApi::openStream
3328 // So we clean up for single channel only
3330 if ( buffersAllocated )
3331 ASIODisposeBuffers();
3333 drivers.removeCurrentDriver();
3336 CloseHandle( handle->condition );
3337 if ( handle->bufferInfos )
3338 free( handle->bufferInfos );
3341 stream_.apiHandle = 0;
3345 if ( stream_.userBuffer[mode] ) {
3346 free( stream_.userBuffer[mode] );
3347 stream_.userBuffer[mode] = 0;
3350 if ( stream_.deviceBuffer ) {
3351 free( stream_.deviceBuffer );
3352 stream_.deviceBuffer = 0;
3357 }////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
3359 void RtApiAsio :: closeStream()
3361 if ( stream_.state == STREAM_CLOSED ) {
3362 errorText_ = "RtApiAsio::closeStream(): no open stream to close!";
3363 error( RtAudioError::WARNING );
3367 if ( stream_.state == STREAM_RUNNING ) {
3368 stream_.state = STREAM_STOPPED;
3371 ASIODisposeBuffers();
3372 drivers.removeCurrentDriver();
3374 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3376 CloseHandle( handle->condition );
3377 if ( handle->bufferInfos )
3378 free( handle->bufferInfos );
3380 stream_.apiHandle = 0;
3383 for ( int i=0; i<2; i++ ) {
3384 if ( stream_.userBuffer[i] ) {
3385 free( stream_.userBuffer[i] );
3386 stream_.userBuffer[i] = 0;
3390 if ( stream_.deviceBuffer ) {
3391 free( stream_.deviceBuffer );
3392 stream_.deviceBuffer = 0;
3395 stream_.mode = UNINITIALIZED;
3396 stream_.state = STREAM_CLOSED;
3399 bool stopThreadCalled = false;
3401 void RtApiAsio :: startStream()
3404 if ( stream_.state == STREAM_RUNNING ) {
3405 errorText_ = "RtApiAsio::startStream(): the stream is already running!";
3406 error( RtAudioError::WARNING );
3410 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3411 ASIOError result = ASIOStart();
3412 if ( result != ASE_OK ) {
3413 errorStream_ << "RtApiAsio::startStream: error (" << getAsioErrorString( result ) << ") starting device.";
3414 errorText_ = errorStream_.str();
3418 handle->drainCounter = 0;
3419 handle->internalDrain = false;
3420 ResetEvent( handle->condition );
3421 stream_.state = STREAM_RUNNING;
3425 stopThreadCalled = false;
3427 if ( result == ASE_OK ) return;
3428 error( RtAudioError::SYSTEM_ERROR );
3431 void RtApiAsio :: stopStream()
3434 if ( stream_.state == STREAM_STOPPED ) {
3435 errorText_ = "RtApiAsio::stopStream(): the stream is already stopped!";
3436 error( RtAudioError::WARNING );
3440 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3441 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
3442 if ( handle->drainCounter == 0 ) {
3443 handle->drainCounter = 2;
3444 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
3448 stream_.state = STREAM_STOPPED;
3450 ASIOError result = ASIOStop();
3451 if ( result != ASE_OK ) {
3452 errorStream_ << "RtApiAsio::stopStream: error (" << getAsioErrorString( result ) << ") stopping device.";
3453 errorText_ = errorStream_.str();
3456 if ( result == ASE_OK ) return;
3457 error( RtAudioError::SYSTEM_ERROR );
3460 void RtApiAsio :: abortStream()
3463 if ( stream_.state == STREAM_STOPPED ) {
3464 errorText_ = "RtApiAsio::abortStream(): the stream is already stopped!";
3465 error( RtAudioError::WARNING );
3469 // The following lines were commented-out because some behavior was
3470 // noted where the device buffers need to be zeroed to avoid
3471 // continuing sound, even when the device buffers are completely
3472 // disposed. So now, calling abort is the same as calling stop.
3473 // AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3474 // handle->drainCounter = 2;
3478 // This function will be called by a spawned thread when the user
3479 // callback function signals that the stream should be stopped or
3480 // aborted. It is necessary to handle it this way because the
3481 // callbackEvent() function must return before the ASIOStop()
3482 // function will return.
3483 static unsigned __stdcall asioStopStream( void *ptr )
3485 CallbackInfo *info = (CallbackInfo *) ptr;
3486 RtApiAsio *object = (RtApiAsio *) info->object;
3488 object->stopStream();
3493 bool RtApiAsio :: callbackEvent( long bufferIndex )
3495 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
3496 if ( stream_.state == STREAM_CLOSED ) {
3497 errorText_ = "RtApiAsio::callbackEvent(): the stream is closed ... this shouldn't happen!";
3498 error( RtAudioError::WARNING );
3502 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
3503 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3505 // Check if we were draining the stream and signal if finished.
3506 if ( handle->drainCounter > 3 ) {
3508 stream_.state = STREAM_STOPPING;
3509 if ( handle->internalDrain == false )
3510 SetEvent( handle->condition );
3511 else { // spawn a thread to stop the stream
3513 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
3514 &stream_.callbackInfo, 0, &threadId );
3519 // Invoke user callback to get fresh output data UNLESS we are
3521 if ( handle->drainCounter == 0 ) {
3522 RtAudioCallback callback = (RtAudioCallback) info->callback;
3523 double streamTime = getStreamTime();
3524 RtAudioStreamStatus status = 0;
3525 if ( stream_.mode != INPUT && asioXRun == true ) {
3526 status |= RTAUDIO_OUTPUT_UNDERFLOW;
3529 if ( stream_.mode != OUTPUT && asioXRun == true ) {
3530 status |= RTAUDIO_INPUT_OVERFLOW;
3533 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
3534 stream_.bufferSize, streamTime, status, info->userData );
3535 if ( cbReturnValue == 2 ) {
3536 stream_.state = STREAM_STOPPING;
3537 handle->drainCounter = 2;
3539 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
3540 &stream_.callbackInfo, 0, &threadId );
3543 else if ( cbReturnValue == 1 ) {
3544 handle->drainCounter = 1;
3545 handle->internalDrain = true;
3549 unsigned int nChannels, bufferBytes, i, j;
3550 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
3551 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
3553 bufferBytes = stream_.bufferSize * formatBytes( stream_.deviceFormat[0] );
3555 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
3557 for ( i=0, j=0; i<nChannels; i++ ) {
3558 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3559 memset( handle->bufferInfos[i].buffers[bufferIndex], 0, bufferBytes );
3563 else if ( stream_.doConvertBuffer[0] ) {
3565 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
3566 if ( stream_.doByteSwap[0] )
3567 byteSwapBuffer( stream_.deviceBuffer,
3568 stream_.bufferSize * stream_.nDeviceChannels[0],
3569 stream_.deviceFormat[0] );
3571 for ( i=0, j=0; i<nChannels; i++ ) {
3572 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3573 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
3574 &stream_.deviceBuffer[j++*bufferBytes], bufferBytes );
3580 if ( stream_.doByteSwap[0] )
3581 byteSwapBuffer( stream_.userBuffer[0],
3582 stream_.bufferSize * stream_.nUserChannels[0],
3583 stream_.userFormat );
3585 for ( i=0, j=0; i<nChannels; i++ ) {
3586 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3587 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
3588 &stream_.userBuffer[0][bufferBytes*j++], bufferBytes );
3594 // Don't bother draining input
3595 if ( handle->drainCounter ) {
3596 handle->drainCounter++;
3600 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
3602 bufferBytes = stream_.bufferSize * formatBytes(stream_.deviceFormat[1]);
3604 if (stream_.doConvertBuffer[1]) {
3606 // Always interleave ASIO input data.
3607 for ( i=0, j=0; i<nChannels; i++ ) {
3608 if ( handle->bufferInfos[i].isInput == ASIOTrue )
3609 memcpy( &stream_.deviceBuffer[j++*bufferBytes],
3610 handle->bufferInfos[i].buffers[bufferIndex],
3614 if ( stream_.doByteSwap[1] )
3615 byteSwapBuffer( stream_.deviceBuffer,
3616 stream_.bufferSize * stream_.nDeviceChannels[1],
3617 stream_.deviceFormat[1] );
3618 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
3622 for ( i=0, j=0; i<nChannels; i++ ) {
3623 if ( handle->bufferInfos[i].isInput == ASIOTrue ) {
3624 memcpy( &stream_.userBuffer[1][bufferBytes*j++],
3625 handle->bufferInfos[i].buffers[bufferIndex],
3630 if ( stream_.doByteSwap[1] )
3631 byteSwapBuffer( stream_.userBuffer[1],
3632 stream_.bufferSize * stream_.nUserChannels[1],
3633 stream_.userFormat );
3638 // The following call was suggested by Malte Clasen. While the API
3639 // documentation indicates it should not be required, some device
3640 // drivers apparently do not function correctly without it.
3643 RtApi::tickStreamTime();
3647 static void sampleRateChanged( ASIOSampleRate sRate )
3649 // The ASIO documentation says that this usually only happens during
3650 // external sync. Audio processing is not stopped by the driver,
3651 // actual sample rate might not have even changed, maybe only the
3652 // sample rate status of an AES/EBU or S/PDIF digital input at the
3655 RtApi *object = (RtApi *) asioCallbackInfo->object;
3657 object->stopStream();
3659 catch ( RtAudioError &exception ) {
3660 std::cerr << "\nRtApiAsio: sampleRateChanged() error (" << exception.getMessage() << ")!\n" << std::endl;
3664 std::cerr << "\nRtApiAsio: driver reports sample rate changed to " << sRate << " ... stream stopped!!!\n" << std::endl;
3667 static long asioMessages( long selector, long value, void* /*message*/, double* /*opt*/ )
3671 switch( selector ) {
3672 case kAsioSelectorSupported:
3673 if ( value == kAsioResetRequest
3674 || value == kAsioEngineVersion
3675 || value == kAsioResyncRequest
3676 || value == kAsioLatenciesChanged
3677 // The following three were added for ASIO 2.0, you don't
3678 // necessarily have to support them.
3679 || value == kAsioSupportsTimeInfo
3680 || value == kAsioSupportsTimeCode
3681 || value == kAsioSupportsInputMonitor)
3684 case kAsioResetRequest:
3685 // Defer the task and perform the reset of the driver during the
3686 // next "safe" situation. You cannot reset the driver right now,
3687 // as this code is called from the driver. Reset the driver is
3688 // done by completely destruct is. I.e. ASIOStop(),
3689 // ASIODisposeBuffers(), Destruction Afterwards you initialize the
3691 std::cerr << "\nRtApiAsio: driver reset requested!!!" << std::endl;
3694 case kAsioResyncRequest:
3695 // This informs the application that the driver encountered some
3696 // non-fatal data loss. It is used for synchronization purposes
3697 // of different media. Added mainly to work around the Win16Mutex
3698 // problems in Windows 95/98 with the Windows Multimedia system,
3699 // which could lose data because the Mutex was held too long by
3700 // another thread. However a driver can issue it in other
3702 // std::cerr << "\nRtApiAsio: driver resync requested!!!" << std::endl;
3706 case kAsioLatenciesChanged:
3707 // This will inform the host application that the drivers were
3708 // latencies changed. Beware, it this does not mean that the
3709 // buffer sizes have changed! You might need to update internal
3711 std::cerr << "\nRtApiAsio: driver latency may have changed!!!" << std::endl;
3714 case kAsioEngineVersion:
3715 // Return the supported ASIO version of the host application. If
3716 // a host application does not implement this selector, ASIO 1.0
3717 // is assumed by the driver.
3720 case kAsioSupportsTimeInfo:
3721 // Informs the driver whether the
3722 // asioCallbacks.bufferSwitchTimeInfo() callback is supported.
3723 // For compatibility with ASIO 1.0 drivers the host application
3724 // should always support the "old" bufferSwitch method, too.
3727 case kAsioSupportsTimeCode:
3728 // Informs the driver whether application is interested in time
3729 // code info. If an application does not need to know about time
3730 // code, the driver has less work to do.
3737 static const char* getAsioErrorString( ASIOError result )
3745 static const Messages m[] =
3747 { ASE_NotPresent, "Hardware input or output is not present or available." },
3748 { ASE_HWMalfunction, "Hardware is malfunctioning." },
3749 { ASE_InvalidParameter, "Invalid input parameter." },
3750 { ASE_InvalidMode, "Invalid mode." },
3751 { ASE_SPNotAdvancing, "Sample position not advancing." },
3752 { ASE_NoClock, "Sample clock or rate cannot be determined or is not present." },
3753 { ASE_NoMemory, "Not enough memory to complete the request." }
3756 for ( unsigned int i = 0; i < sizeof(m)/sizeof(m[0]); ++i )
3757 if ( m[i].value == result ) return m[i].message;
3759 return "Unknown error.";
3762 //******************** End of __WINDOWS_ASIO__ *********************//
3766 #if defined(__WINDOWS_WASAPI__) // Windows WASAPI API
3768 // Authored by Marcus Tomlinson <themarcustomlinson@gmail.com>, April 2014
3769 // - Introduces support for the Windows WASAPI API
3770 // - Aims to deliver bit streams to and from hardware at the lowest possible latency, via the absolute minimum buffer sizes required
3771 // - Provides flexible stream configuration to an otherwise strict and inflexible WASAPI interface
3772 // - Includes automatic internal conversion of sample rate and buffer size between hardware and the user
3777 #include <audioclient.h>
3779 #include <mmdeviceapi.h>
3780 #include <functiondiscoverykeys_devpkey.h>
3783 //=============================================================================
3785 #define SAFE_RELEASE( objectPtr )\
3788 objectPtr->Release();\
3792 typedef HANDLE ( __stdcall *TAvSetMmThreadCharacteristicsPtr )( LPCWSTR TaskName, LPDWORD TaskIndex );
3794 //-----------------------------------------------------------------------------
3796 // WASAPI dictates stream sample rate, format, channel count, and in some cases, buffer size.
3797 // Therefore we must perform all necessary conversions to user buffers in order to satisfy these
3798 // requirements. WasapiBuffer ring buffers are used between HwIn->UserIn and UserOut->HwOut to
3799 // provide intermediate storage for read / write synchronization.
3813 // sets the length of the internal ring buffer
3814 void setBufferSize( unsigned int bufferSize, unsigned int formatBytes ) {
3817 buffer_ = ( char* ) calloc( bufferSize, formatBytes );
3819 bufferSize_ = bufferSize;
3824 // attempt to push a buffer into the ring buffer at the current "in" index
3825 bool pushBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
3827 if ( !buffer || // incoming buffer is NULL
3828 bufferSize == 0 || // incoming buffer has no data
3829 bufferSize > bufferSize_ ) // incoming buffer too large
3834 unsigned int relOutIndex = outIndex_;
3835 unsigned int inIndexEnd = inIndex_ + bufferSize;
3836 if ( relOutIndex < inIndex_ && inIndexEnd >= bufferSize_ ) {
3837 relOutIndex += bufferSize_;
3840 // "in" index can end on the "out" index but cannot begin at it
3841 if ( inIndex_ <= relOutIndex && inIndexEnd > relOutIndex ) {
3842 return false; // not enough space between "in" index and "out" index
3845 // copy buffer from external to internal
3846 int fromZeroSize = inIndex_ + bufferSize - bufferSize_;
3847 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
3848 int fromInSize = bufferSize - fromZeroSize;
3853 memcpy( &( ( char* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( char ) );
3854 memcpy( buffer_, &( ( char* ) buffer )[fromInSize], fromZeroSize * sizeof( char ) );
3856 case RTAUDIO_SINT16:
3857 memcpy( &( ( short* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( short ) );
3858 memcpy( buffer_, &( ( short* ) buffer )[fromInSize], fromZeroSize * sizeof( short ) );
3860 case RTAUDIO_SINT24:
3861 memcpy( &( ( S24* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( S24 ) );
3862 memcpy( buffer_, &( ( S24* ) buffer )[fromInSize], fromZeroSize * sizeof( S24 ) );
3864 case RTAUDIO_SINT32:
3865 memcpy( &( ( int* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( int ) );
3866 memcpy( buffer_, &( ( int* ) buffer )[fromInSize], fromZeroSize * sizeof( int ) );
3868 case RTAUDIO_FLOAT32:
3869 memcpy( &( ( float* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( float ) );
3870 memcpy( buffer_, &( ( float* ) buffer )[fromInSize], fromZeroSize * sizeof( float ) );
3872 case RTAUDIO_FLOAT64:
3873 memcpy( &( ( double* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( double ) );
3874 memcpy( buffer_, &( ( double* ) buffer )[fromInSize], fromZeroSize * sizeof( double ) );
3878 // update "in" index
3879 inIndex_ += bufferSize;
3880 inIndex_ %= bufferSize_;
3885 // attempt to pull a buffer from the ring buffer from the current "out" index
3886 bool pullBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
3888 if ( !buffer || // incoming buffer is NULL
3889 bufferSize == 0 || // incoming buffer has no data
3890 bufferSize > bufferSize_ ) // incoming buffer too large
3895 unsigned int relInIndex = inIndex_;
3896 unsigned int outIndexEnd = outIndex_ + bufferSize;
3897 if ( relInIndex < outIndex_ && outIndexEnd >= bufferSize_ ) {
3898 relInIndex += bufferSize_;
3901 // "out" index can begin at and end on the "in" index
3902 if ( outIndex_ < relInIndex && outIndexEnd > relInIndex ) {
3903 return false; // not enough space between "out" index and "in" index
3906 // copy buffer from internal to external
3907 int fromZeroSize = outIndex_ + bufferSize - bufferSize_;
3908 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
3909 int fromOutSize = bufferSize - fromZeroSize;
3914 memcpy( buffer, &( ( char* ) buffer_ )[outIndex_], fromOutSize * sizeof( char ) );
3915 memcpy( &( ( char* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( char ) );
3917 case RTAUDIO_SINT16:
3918 memcpy( buffer, &( ( short* ) buffer_ )[outIndex_], fromOutSize * sizeof( short ) );
3919 memcpy( &( ( short* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( short ) );
3921 case RTAUDIO_SINT24:
3922 memcpy( buffer, &( ( S24* ) buffer_ )[outIndex_], fromOutSize * sizeof( S24 ) );
3923 memcpy( &( ( S24* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( S24 ) );
3925 case RTAUDIO_SINT32:
3926 memcpy( buffer, &( ( int* ) buffer_ )[outIndex_], fromOutSize * sizeof( int ) );
3927 memcpy( &( ( int* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( int ) );
3929 case RTAUDIO_FLOAT32:
3930 memcpy( buffer, &( ( float* ) buffer_ )[outIndex_], fromOutSize * sizeof( float ) );
3931 memcpy( &( ( float* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( float ) );
3933 case RTAUDIO_FLOAT64:
3934 memcpy( buffer, &( ( double* ) buffer_ )[outIndex_], fromOutSize * sizeof( double ) );
3935 memcpy( &( ( double* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( double ) );
3939 // update "out" index
3940 outIndex_ += bufferSize;
3941 outIndex_ %= bufferSize_;
3948 unsigned int bufferSize_;
3949 unsigned int inIndex_;
3950 unsigned int outIndex_;
3953 //-----------------------------------------------------------------------------
3955 // A structure to hold various information related to the WASAPI implementation.
3958 IAudioClient* captureAudioClient;
3959 IAudioClient* renderAudioClient;
3960 IAudioCaptureClient* captureClient;
3961 IAudioRenderClient* renderClient;
3962 HANDLE captureEvent;
3966 : captureAudioClient( NULL ),
3967 renderAudioClient( NULL ),
3968 captureClient( NULL ),
3969 renderClient( NULL ),
3970 captureEvent( NULL ),
3971 renderEvent( NULL ) {}
3974 //=============================================================================
3976 RtApiWasapi::RtApiWasapi()
3977 : coInitialized_( false ), deviceEnumerator_( NULL )
3979 // WASAPI can run either apartment or multi-threaded
3980 HRESULT hr = CoInitialize( NULL );
3981 if ( !FAILED( hr ) )
3982 coInitialized_ = true;
3984 // Instantiate device enumerator
3985 hr = CoCreateInstance( __uuidof( MMDeviceEnumerator ), NULL,
3986 CLSCTX_ALL, __uuidof( IMMDeviceEnumerator ),
3987 ( void** ) &deviceEnumerator_ );
3989 if ( FAILED( hr ) ) {
3990 errorText_ = "RtApiWasapi::RtApiWasapi: Unable to instantiate device enumerator";
3991 error( RtAudioError::DRIVER_ERROR );
3995 //-----------------------------------------------------------------------------
3997 RtApiWasapi::~RtApiWasapi()
3999 if ( stream_.state != STREAM_CLOSED )
4002 SAFE_RELEASE( deviceEnumerator_ );
4004 // If this object previously called CoInitialize()
4005 if ( coInitialized_ )
4009 //=============================================================================
4011 unsigned int RtApiWasapi::getDeviceCount( void )
4013 unsigned int captureDeviceCount = 0;
4014 unsigned int renderDeviceCount = 0;
4016 IMMDeviceCollection* captureDevices = NULL;
4017 IMMDeviceCollection* renderDevices = NULL;
4019 // Count capture devices
4021 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4022 if ( FAILED( hr ) ) {
4023 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device collection.";
4027 hr = captureDevices->GetCount( &captureDeviceCount );
4028 if ( FAILED( hr ) ) {
4029 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device count.";
4033 // Count render devices
4034 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4035 if ( FAILED( hr ) ) {
4036 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device collection.";
4040 hr = renderDevices->GetCount( &renderDeviceCount );
4041 if ( FAILED( hr ) ) {
4042 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device count.";
4047 // release all references
4048 SAFE_RELEASE( captureDevices );
4049 SAFE_RELEASE( renderDevices );
4051 if ( errorText_.empty() )
4052 return captureDeviceCount + renderDeviceCount;
4054 error( RtAudioError::DRIVER_ERROR );
4058 //-----------------------------------------------------------------------------
4060 RtAudio::DeviceInfo RtApiWasapi::getDeviceInfo( unsigned int device )
4062 RtAudio::DeviceInfo info;
4063 unsigned int captureDeviceCount = 0;
4064 unsigned int renderDeviceCount = 0;
4065 std::string defaultDeviceName;
4066 bool isCaptureDevice = false;
4068 PROPVARIANT deviceNameProp;
4069 PROPVARIANT defaultDeviceNameProp;
4071 IMMDeviceCollection* captureDevices = NULL;
4072 IMMDeviceCollection* renderDevices = NULL;
4073 IMMDevice* devicePtr = NULL;
4074 IMMDevice* defaultDevicePtr = NULL;
4075 IAudioClient* audioClient = NULL;
4076 IPropertyStore* devicePropStore = NULL;
4077 IPropertyStore* defaultDevicePropStore = NULL;
4079 WAVEFORMATEX* deviceFormat = NULL;
4080 WAVEFORMATEX* closestMatchFormat = NULL;
4083 info.probed = false;
4085 // Count capture devices
4087 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4088 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4089 if ( FAILED( hr ) ) {
4090 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device collection.";
4094 hr = captureDevices->GetCount( &captureDeviceCount );
4095 if ( FAILED( hr ) ) {
4096 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device count.";
4100 // Count render devices
4101 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4102 if ( FAILED( hr ) ) {
4103 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device collection.";
4107 hr = renderDevices->GetCount( &renderDeviceCount );
4108 if ( FAILED( hr ) ) {
4109 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device count.";
4113 // validate device index
4114 if ( device >= captureDeviceCount + renderDeviceCount ) {
4115 errorText_ = "RtApiWasapi::getDeviceInfo: Invalid device index.";
4116 errorType = RtAudioError::INVALID_USE;
4120 // determine whether index falls within capture or render devices
4121 if ( device >= renderDeviceCount ) {
4122 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
4123 if ( FAILED( hr ) ) {
4124 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device handle.";
4127 isCaptureDevice = true;
4130 hr = renderDevices->Item( device, &devicePtr );
4131 if ( FAILED( hr ) ) {
4132 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device handle.";
4135 isCaptureDevice = false;
4138 // get default device name
4139 if ( isCaptureDevice ) {
4140 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eCapture, eConsole, &defaultDevicePtr );
4141 if ( FAILED( hr ) ) {
4142 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default capture device handle.";
4147 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eRender, eConsole, &defaultDevicePtr );
4148 if ( FAILED( hr ) ) {
4149 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default render device handle.";
4154 hr = defaultDevicePtr->OpenPropertyStore( STGM_READ, &defaultDevicePropStore );
4155 if ( FAILED( hr ) ) {
4156 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open default device property store.";
4159 PropVariantInit( &defaultDeviceNameProp );
4161 hr = defaultDevicePropStore->GetValue( PKEY_Device_FriendlyName, &defaultDeviceNameProp );
4162 if ( FAILED( hr ) ) {
4163 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default device property: PKEY_Device_FriendlyName.";
4167 defaultDeviceName = convertCharPointerToStdString(defaultDeviceNameProp.pwszVal);
4170 hr = devicePtr->OpenPropertyStore( STGM_READ, &devicePropStore );
4171 if ( FAILED( hr ) ) {
4172 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open device property store.";
4176 PropVariantInit( &deviceNameProp );
4178 hr = devicePropStore->GetValue( PKEY_Device_FriendlyName, &deviceNameProp );
4179 if ( FAILED( hr ) ) {
4180 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device property: PKEY_Device_FriendlyName.";
4184 info.name =convertCharPointerToStdString(deviceNameProp.pwszVal);
4187 if ( isCaptureDevice ) {
4188 info.isDefaultInput = info.name == defaultDeviceName;
4189 info.isDefaultOutput = false;
4192 info.isDefaultInput = false;
4193 info.isDefaultOutput = info.name == defaultDeviceName;
4197 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL, NULL, ( void** ) &audioClient );
4198 if ( FAILED( hr ) ) {
4199 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device audio client.";
4203 hr = audioClient->GetMixFormat( &deviceFormat );
4204 if ( FAILED( hr ) ) {
4205 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device mix format.";
4209 if ( isCaptureDevice ) {
4210 info.inputChannels = deviceFormat->nChannels;
4211 info.outputChannels = 0;
4212 info.duplexChannels = 0;
4215 info.inputChannels = 0;
4216 info.outputChannels = deviceFormat->nChannels;
4217 info.duplexChannels = 0;
4220 // sample rates (WASAPI only supports the one native sample rate)
4221 info.preferredSampleRate = deviceFormat->nSamplesPerSec;
4223 info.sampleRates.clear();
4224 info.sampleRates.push_back( deviceFormat->nSamplesPerSec );
4227 info.nativeFormats = 0;
4229 if ( deviceFormat->wFormatTag == WAVE_FORMAT_IEEE_FLOAT ||
4230 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
4231 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_IEEE_FLOAT ) )
4233 if ( deviceFormat->wBitsPerSample == 32 ) {
4234 info.nativeFormats |= RTAUDIO_FLOAT32;
4236 else if ( deviceFormat->wBitsPerSample == 64 ) {
4237 info.nativeFormats |= RTAUDIO_FLOAT64;
4240 else if ( deviceFormat->wFormatTag == WAVE_FORMAT_PCM ||
4241 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
4242 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_PCM ) )
4244 if ( deviceFormat->wBitsPerSample == 8 ) {
4245 info.nativeFormats |= RTAUDIO_SINT8;
4247 else if ( deviceFormat->wBitsPerSample == 16 ) {
4248 info.nativeFormats |= RTAUDIO_SINT16;
4250 else if ( deviceFormat->wBitsPerSample == 24 ) {
4251 info.nativeFormats |= RTAUDIO_SINT24;
4253 else if ( deviceFormat->wBitsPerSample == 32 ) {
4254 info.nativeFormats |= RTAUDIO_SINT32;
4262 // release all references
4263 PropVariantClear( &deviceNameProp );
4264 PropVariantClear( &defaultDeviceNameProp );
4266 SAFE_RELEASE( captureDevices );
4267 SAFE_RELEASE( renderDevices );
4268 SAFE_RELEASE( devicePtr );
4269 SAFE_RELEASE( defaultDevicePtr );
4270 SAFE_RELEASE( audioClient );
4271 SAFE_RELEASE( devicePropStore );
4272 SAFE_RELEASE( defaultDevicePropStore );
4274 CoTaskMemFree( deviceFormat );
4275 CoTaskMemFree( closestMatchFormat );
4277 if ( !errorText_.empty() )
4282 //-----------------------------------------------------------------------------
4284 unsigned int RtApiWasapi::getDefaultOutputDevice( void )
4286 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
4287 if ( getDeviceInfo( i ).isDefaultOutput ) {
4295 //-----------------------------------------------------------------------------
4297 unsigned int RtApiWasapi::getDefaultInputDevice( void )
4299 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
4300 if ( getDeviceInfo( i ).isDefaultInput ) {
4308 //-----------------------------------------------------------------------------
4310 void RtApiWasapi::closeStream( void )
4312 if ( stream_.state == STREAM_CLOSED ) {
4313 errorText_ = "RtApiWasapi::closeStream: No open stream to close.";
4314 error( RtAudioError::WARNING );
4318 if ( stream_.state != STREAM_STOPPED )
4321 // clean up stream memory
4322 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient )
4323 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient )
4325 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureClient )
4326 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderClient )
4328 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent )
4329 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent );
4331 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent )
4332 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent );
4334 delete ( WasapiHandle* ) stream_.apiHandle;
4335 stream_.apiHandle = NULL;
4337 for ( int i = 0; i < 2; i++ ) {
4338 if ( stream_.userBuffer[i] ) {
4339 free( stream_.userBuffer[i] );
4340 stream_.userBuffer[i] = 0;
4344 if ( stream_.deviceBuffer ) {
4345 free( stream_.deviceBuffer );
4346 stream_.deviceBuffer = 0;
4349 // update stream state
4350 stream_.state = STREAM_CLOSED;
4353 //-----------------------------------------------------------------------------
4355 void RtApiWasapi::startStream( void )
4359 if ( stream_.state == STREAM_RUNNING ) {
4360 errorText_ = "RtApiWasapi::startStream: The stream is already running.";
4361 error( RtAudioError::WARNING );
4365 // update stream state
4366 stream_.state = STREAM_RUNNING;
4368 // create WASAPI stream thread
4369 stream_.callbackInfo.thread = ( ThreadHandle ) CreateThread( NULL, 0, runWasapiThread, this, CREATE_SUSPENDED, NULL );
4371 if ( !stream_.callbackInfo.thread ) {
4372 errorText_ = "RtApiWasapi::startStream: Unable to instantiate callback thread.";
4373 error( RtAudioError::THREAD_ERROR );
4376 SetThreadPriority( ( void* ) stream_.callbackInfo.thread, stream_.callbackInfo.priority );
4377 ResumeThread( ( void* ) stream_.callbackInfo.thread );
4381 //-----------------------------------------------------------------------------
4383 void RtApiWasapi::stopStream( void )
4387 if ( stream_.state == STREAM_STOPPED ) {
4388 errorText_ = "RtApiWasapi::stopStream: The stream is already stopped.";
4389 error( RtAudioError::WARNING );
4393 // inform stream thread by setting stream state to STREAM_STOPPING
4394 stream_.state = STREAM_STOPPING;
4396 // wait until stream thread is stopped
4397 while( stream_.state != STREAM_STOPPED ) {
4401 // Wait for the last buffer to play before stopping.
4402 Sleep( 1000 * stream_.bufferSize / stream_.sampleRate );
4404 // stop capture client if applicable
4405 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) {
4406 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient->Stop();
4407 if ( FAILED( hr ) ) {
4408 errorText_ = "RtApiWasapi::stopStream: Unable to stop capture stream.";
4409 error( RtAudioError::DRIVER_ERROR );
4414 // stop render client if applicable
4415 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) {
4416 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient->Stop();
4417 if ( FAILED( hr ) ) {
4418 errorText_ = "RtApiWasapi::stopStream: Unable to stop render stream.";
4419 error( RtAudioError::DRIVER_ERROR );
4424 // close thread handle
4425 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
4426 errorText_ = "RtApiWasapi::stopStream: Unable to close callback thread.";
4427 error( RtAudioError::THREAD_ERROR );
4431 stream_.callbackInfo.thread = (ThreadHandle) NULL;
4434 //-----------------------------------------------------------------------------
4436 void RtApiWasapi::abortStream( void )
4440 if ( stream_.state == STREAM_STOPPED ) {
4441 errorText_ = "RtApiWasapi::abortStream: The stream is already stopped.";
4442 error( RtAudioError::WARNING );
4446 // inform stream thread by setting stream state to STREAM_STOPPING
4447 stream_.state = STREAM_STOPPING;
4449 // wait until stream thread is stopped
4450 while ( stream_.state != STREAM_STOPPED ) {
4454 // stop capture client if applicable
4455 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) {
4456 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient->Stop();
4457 if ( FAILED( hr ) ) {
4458 errorText_ = "RtApiWasapi::abortStream: Unable to stop capture stream.";
4459 error( RtAudioError::DRIVER_ERROR );
4464 // stop render client if applicable
4465 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) {
4466 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient->Stop();
4467 if ( FAILED( hr ) ) {
4468 errorText_ = "RtApiWasapi::abortStream: Unable to stop render stream.";
4469 error( RtAudioError::DRIVER_ERROR );
4474 // close thread handle
4475 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
4476 errorText_ = "RtApiWasapi::abortStream: Unable to close callback thread.";
4477 error( RtAudioError::THREAD_ERROR );
4481 stream_.callbackInfo.thread = (ThreadHandle) NULL;
4484 //-----------------------------------------------------------------------------
4486 bool RtApiWasapi::probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
4487 unsigned int firstChannel, unsigned int sampleRate,
4488 RtAudioFormat format, unsigned int* bufferSize,
4489 RtAudio::StreamOptions* options )
4491 bool methodResult = FAILURE;
4492 unsigned int captureDeviceCount = 0;
4493 unsigned int renderDeviceCount = 0;
4495 IMMDeviceCollection* captureDevices = NULL;
4496 IMMDeviceCollection* renderDevices = NULL;
4497 IMMDevice* devicePtr = NULL;
4498 WAVEFORMATEX* deviceFormat = NULL;
4499 unsigned int bufferBytes;
4500 stream_.state = STREAM_STOPPED;
4501 RtAudio::DeviceInfo deviceInfo;
4503 // create API Handle if not already created
4504 if ( !stream_.apiHandle )
4505 stream_.apiHandle = ( void* ) new WasapiHandle();
4507 // Count capture devices
4509 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4510 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4511 if ( FAILED( hr ) ) {
4512 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device collection.";
4516 hr = captureDevices->GetCount( &captureDeviceCount );
4517 if ( FAILED( hr ) ) {
4518 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device count.";
4522 // Count render devices
4523 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4524 if ( FAILED( hr ) ) {
4525 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device collection.";
4529 hr = renderDevices->GetCount( &renderDeviceCount );
4530 if ( FAILED( hr ) ) {
4531 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device count.";
4535 // validate device index
4536 if ( device >= captureDeviceCount + renderDeviceCount ) {
4537 errorType = RtAudioError::INVALID_USE;
4538 errorText_ = "RtApiWasapi::probeDeviceOpen: Invalid device index.";
4542 deviceInfo = getDeviceInfo( device );
4544 // validate sample rate
4545 if ( sampleRate != deviceInfo.preferredSampleRate )
4547 errorType = RtAudioError::INVALID_USE;
4548 std::stringstream ss;
4549 ss << "RtApiWasapi::probeDeviceOpen: " << sampleRate
4550 << "Hz sample rate not supported. This device only supports "
4551 << deviceInfo.preferredSampleRate << "Hz.";
4552 errorText_ = ss.str();
4556 // determine whether index falls within capture or render devices
4557 if ( device >= renderDeviceCount ) {
4558 if ( mode != INPUT ) {
4559 errorType = RtAudioError::INVALID_USE;
4560 errorText_ = "RtApiWasapi::probeDeviceOpen: Capture device selected as output device.";
4564 // retrieve captureAudioClient from devicePtr
4565 IAudioClient*& captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4567 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
4568 if ( FAILED( hr ) ) {
4569 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device handle.";
4573 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4574 NULL, ( void** ) &captureAudioClient );
4575 if ( FAILED( hr ) ) {
4576 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device audio client.";
4580 hr = captureAudioClient->GetMixFormat( &deviceFormat );
4581 if ( FAILED( hr ) ) {
4582 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device mix format.";
4586 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4587 captureAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4590 if ( mode != OUTPUT ) {
4591 errorType = RtAudioError::INVALID_USE;
4592 errorText_ = "RtApiWasapi::probeDeviceOpen: Render device selected as input device.";
4596 // retrieve renderAudioClient from devicePtr
4597 IAudioClient*& renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4599 hr = renderDevices->Item( device, &devicePtr );
4600 if ( FAILED( hr ) ) {
4601 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device handle.";
4605 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4606 NULL, ( void** ) &renderAudioClient );
4607 if ( FAILED( hr ) ) {
4608 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device audio client.";
4612 hr = renderAudioClient->GetMixFormat( &deviceFormat );
4613 if ( FAILED( hr ) ) {
4614 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device mix format.";
4618 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4619 renderAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4623 if ( ( stream_.mode == OUTPUT && mode == INPUT ) ||
4624 ( stream_.mode == INPUT && mode == OUTPUT ) ) {
4625 stream_.mode = DUPLEX;
4628 stream_.mode = mode;
4631 stream_.device[mode] = device;
4632 stream_.doByteSwap[mode] = false;
4633 stream_.sampleRate = sampleRate;
4634 stream_.bufferSize = *bufferSize;
4635 stream_.nBuffers = 1;
4636 stream_.nUserChannels[mode] = channels;
4637 stream_.channelOffset[mode] = firstChannel;
4638 stream_.userFormat = format;
4639 stream_.deviceFormat[mode] = deviceInfo.nativeFormats;
4641 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
4642 stream_.userInterleaved = false;
4644 stream_.userInterleaved = true;
4645 stream_.deviceInterleaved[mode] = true;
4647 // Set flags for buffer conversion.
4648 stream_.doConvertBuffer[mode] = false;
4649 if ( stream_.userFormat != stream_.deviceFormat[mode] ||
4650 stream_.nUserChannels != stream_.nDeviceChannels )
4651 stream_.doConvertBuffer[mode] = true;
4652 else if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
4653 stream_.nUserChannels[mode] > 1 )
4654 stream_.doConvertBuffer[mode] = true;
4656 if ( stream_.doConvertBuffer[mode] )
4657 setConvertInfo( mode, 0 );
4659 // Allocate necessary internal buffers
4660 bufferBytes = stream_.nUserChannels[mode] * stream_.bufferSize * formatBytes( stream_.userFormat );
4662 stream_.userBuffer[mode] = ( char* ) calloc( bufferBytes, 1 );
4663 if ( !stream_.userBuffer[mode] ) {
4664 errorType = RtAudioError::MEMORY_ERROR;
4665 errorText_ = "RtApiWasapi::probeDeviceOpen: Error allocating user buffer memory.";
4669 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME )
4670 stream_.callbackInfo.priority = 15;
4672 stream_.callbackInfo.priority = 0;
4674 ///! TODO: RTAUDIO_MINIMIZE_LATENCY // Provide stream buffers directly to callback
4675 ///! TODO: RTAUDIO_HOG_DEVICE // Exclusive mode
4677 methodResult = SUCCESS;
4681 SAFE_RELEASE( captureDevices );
4682 SAFE_RELEASE( renderDevices );
4683 SAFE_RELEASE( devicePtr );
4684 CoTaskMemFree( deviceFormat );
4686 // if method failed, close the stream
4687 if ( methodResult == FAILURE )
4690 if ( !errorText_.empty() )
4692 return methodResult;
4695 //=============================================================================
4697 DWORD WINAPI RtApiWasapi::runWasapiThread( void* wasapiPtr )
4700 ( ( RtApiWasapi* ) wasapiPtr )->wasapiThread();
4705 DWORD WINAPI RtApiWasapi::stopWasapiThread( void* wasapiPtr )
4708 ( ( RtApiWasapi* ) wasapiPtr )->stopStream();
4713 DWORD WINAPI RtApiWasapi::abortWasapiThread( void* wasapiPtr )
4716 ( ( RtApiWasapi* ) wasapiPtr )->abortStream();
4721 //-----------------------------------------------------------------------------
4723 void RtApiWasapi::wasapiThread()
4725 // as this is a new thread, we must CoInitialize it
4726 CoInitialize( NULL );
4730 IAudioClient* captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4731 IAudioClient* renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4732 IAudioCaptureClient* captureClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureClient;
4733 IAudioRenderClient* renderClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderClient;
4734 HANDLE captureEvent = ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent;
4735 HANDLE renderEvent = ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent;
4737 WAVEFORMATEX* captureFormat = NULL;
4738 WAVEFORMATEX* renderFormat = NULL;
4739 WasapiBuffer captureBuffer;
4740 WasapiBuffer renderBuffer;
4742 // declare local stream variables
4743 RtAudioCallback callback = ( RtAudioCallback ) stream_.callbackInfo.callback;
4744 BYTE* streamBuffer = NULL;
4745 unsigned long captureFlags = 0;
4746 unsigned int bufferFrameCount = 0;
4747 unsigned int numFramesPadding = 0;
4748 bool callbackPushed = false;
4749 bool callbackPulled = false;
4750 bool callbackStopped = false;
4751 int callbackResult = 0;
4753 unsigned int deviceBuffSize = 0;
4756 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4758 // Attempt to assign "Pro Audio" characteristic to thread
4759 HMODULE AvrtDll = LoadLibrary( (LPCTSTR) "AVRT.dll" );
4761 DWORD taskIndex = 0;
4762 TAvSetMmThreadCharacteristicsPtr AvSetMmThreadCharacteristicsPtr = ( TAvSetMmThreadCharacteristicsPtr ) GetProcAddress( AvrtDll, "AvSetMmThreadCharacteristicsW" );
4763 AvSetMmThreadCharacteristicsPtr( L"Pro Audio", &taskIndex );
4764 FreeLibrary( AvrtDll );
4767 // start capture stream if applicable
4768 if ( captureAudioClient ) {
4769 hr = captureAudioClient->GetMixFormat( &captureFormat );
4770 if ( FAILED( hr ) ) {
4771 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
4775 // initialize capture stream according to desire buffer size
4776 REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) stream_.bufferSize * 10000000 / captureFormat->nSamplesPerSec );
4778 if ( !captureClient ) {
4779 hr = captureAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
4780 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
4781 desiredBufferPeriod,
4782 desiredBufferPeriod,
4785 if ( FAILED( hr ) ) {
4786 errorText_ = "RtApiWasapi::wasapiThread: Unable to initialize capture audio client.";
4790 hr = captureAudioClient->GetService( __uuidof( IAudioCaptureClient ),
4791 ( void** ) &captureClient );
4792 if ( FAILED( hr ) ) {
4793 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve capture client handle.";
4797 // configure captureEvent to trigger on every available capture buffer
4798 captureEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
4799 if ( !captureEvent ) {
4800 errorType = RtAudioError::SYSTEM_ERROR;
4801 errorText_ = "RtApiWasapi::wasapiThread: Unable to create capture event.";
4805 hr = captureAudioClient->SetEventHandle( captureEvent );
4806 if ( FAILED( hr ) ) {
4807 errorText_ = "RtApiWasapi::wasapiThread: Unable to set capture event handle.";
4811 ( ( WasapiHandle* ) stream_.apiHandle )->captureClient = captureClient;
4812 ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent = captureEvent;
4815 unsigned int inBufferSize = 0;
4816 hr = captureAudioClient->GetBufferSize( &inBufferSize );
4817 if ( FAILED( hr ) ) {
4818 errorText_ = "RtApiWasapi::wasapiThread: Unable to get capture buffer size.";
4822 // scale outBufferSize according to stream->user sample rate ratio
4823 unsigned int outBufferSize = ( unsigned int ) stream_.bufferSize * stream_.nDeviceChannels[INPUT];
4824 inBufferSize *= stream_.nDeviceChannels[INPUT];
4826 // set captureBuffer size
4827 captureBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[INPUT] ) );
4829 // reset the capture stream
4830 hr = captureAudioClient->Reset();
4831 if ( FAILED( hr ) ) {
4832 errorText_ = "RtApiWasapi::wasapiThread: Unable to reset capture stream.";
4836 // start the capture stream
4837 hr = captureAudioClient->Start();
4838 if ( FAILED( hr ) ) {
4839 errorText_ = "RtApiWasapi::wasapiThread: Unable to start capture stream.";
4844 // start render stream if applicable
4845 if ( renderAudioClient ) {
4846 hr = renderAudioClient->GetMixFormat( &renderFormat );
4847 if ( FAILED( hr ) ) {
4848 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
4852 // initialize render stream according to desire buffer size
4853 REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) stream_.bufferSize * 10000000 / renderFormat->nSamplesPerSec );
4855 if ( !renderClient ) {
4856 hr = renderAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
4857 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
4858 desiredBufferPeriod,
4859 desiredBufferPeriod,
4862 if ( FAILED( hr ) ) {
4863 errorText_ = "RtApiWasapi::wasapiThread: Unable to initialize render audio client.";
4867 hr = renderAudioClient->GetService( __uuidof( IAudioRenderClient ),
4868 ( void** ) &renderClient );
4869 if ( FAILED( hr ) ) {
4870 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render client handle.";
4874 // configure renderEvent to trigger on every available render buffer
4875 renderEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
4876 if ( !renderEvent ) {
4877 errorType = RtAudioError::SYSTEM_ERROR;
4878 errorText_ = "RtApiWasapi::wasapiThread: Unable to create render event.";
4882 hr = renderAudioClient->SetEventHandle( renderEvent );
4883 if ( FAILED( hr ) ) {
4884 errorText_ = "RtApiWasapi::wasapiThread: Unable to set render event handle.";
4888 ( ( WasapiHandle* ) stream_.apiHandle )->renderClient = renderClient;
4889 ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent = renderEvent;
4892 unsigned int outBufferSize = 0;
4893 hr = renderAudioClient->GetBufferSize( &outBufferSize );
4894 if ( FAILED( hr ) ) {
4895 errorText_ = "RtApiWasapi::wasapiThread: Unable to get render buffer size.";
4899 // scale inBufferSize according to user->stream sample rate ratio
4900 unsigned int inBufferSize = ( unsigned int ) stream_.bufferSize * stream_.nDeviceChannels[OUTPUT];
4901 outBufferSize *= stream_.nDeviceChannels[OUTPUT];
4903 // set renderBuffer size
4904 renderBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[OUTPUT] ) );
4906 // reset the render stream
4907 hr = renderAudioClient->Reset();
4908 if ( FAILED( hr ) ) {
4909 errorText_ = "RtApiWasapi::wasapiThread: Unable to reset render stream.";
4913 // start the render stream
4914 hr = renderAudioClient->Start();
4915 if ( FAILED( hr ) ) {
4916 errorText_ = "RtApiWasapi::wasapiThread: Unable to start render stream.";
4921 if ( stream_.mode == INPUT ) {
4922 using namespace std; // for roundf
4923 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
4925 else if ( stream_.mode == OUTPUT ) {
4926 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
4928 else if ( stream_.mode == DUPLEX ) {
4929 deviceBuffSize = std::max( stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
4930 stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
4933 stream_.deviceBuffer = ( char* ) malloc( deviceBuffSize );
4934 if ( !stream_.deviceBuffer ) {
4935 errorType = RtAudioError::MEMORY_ERROR;
4936 errorText_ = "RtApiWasapi::wasapiThread: Error allocating device buffer memory.";
4940 // stream process loop
4941 while ( stream_.state != STREAM_STOPPING ) {
4942 if ( !callbackPulled ) {
4945 // 1. Pull callback buffer from inputBuffer
4946 // 2. If 1. was successful: Convert callback buffer to user format
4948 if ( captureAudioClient ) {
4949 // Pull callback buffer from inputBuffer
4950 callbackPulled = captureBuffer.pullBuffer( stream_.deviceBuffer,
4951 ( unsigned int ) stream_.bufferSize * stream_.nDeviceChannels[INPUT],
4952 stream_.deviceFormat[INPUT] );
4954 if ( callbackPulled ) {
4955 if ( stream_.doConvertBuffer[INPUT] ) {
4956 // Convert callback buffer to user format
4957 convertBuffer( stream_.userBuffer[INPUT],
4958 stream_.deviceBuffer,
4959 stream_.convertInfo[INPUT] );
4962 // no further conversion, simple copy deviceBuffer to userBuffer
4963 memcpy( stream_.userBuffer[INPUT],
4964 stream_.deviceBuffer,
4965 stream_.bufferSize * stream_.nUserChannels[INPUT] * formatBytes( stream_.userFormat ) );
4970 // if there is no capture stream, set callbackPulled flag
4971 callbackPulled = true;
4976 // 1. Execute user callback method
4977 // 2. Handle return value from callback
4979 // if callback has not requested the stream to stop
4980 if ( callbackPulled && !callbackStopped ) {
4981 // Execute user callback method
4982 callbackResult = callback( stream_.userBuffer[OUTPUT],
4983 stream_.userBuffer[INPUT],
4986 captureFlags & AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY ? RTAUDIO_INPUT_OVERFLOW : 0,
4987 stream_.callbackInfo.userData );
4989 // Handle return value from callback
4990 if ( callbackResult == 1 ) {
4991 // instantiate a thread to stop this thread
4992 HANDLE threadHandle = CreateThread( NULL, 0, stopWasapiThread, this, 0, NULL );
4993 if ( !threadHandle ) {
4994 errorType = RtAudioError::THREAD_ERROR;
4995 errorText_ = "RtApiWasapi::wasapiThread: Unable to instantiate stream stop thread.";
4998 else if ( !CloseHandle( threadHandle ) ) {
4999 errorType = RtAudioError::THREAD_ERROR;
5000 errorText_ = "RtApiWasapi::wasapiThread: Unable to close stream stop thread handle.";
5004 callbackStopped = true;
5006 else if ( callbackResult == 2 ) {
5007 // instantiate a thread to stop this thread
5008 HANDLE threadHandle = CreateThread( NULL, 0, abortWasapiThread, this, 0, NULL );
5009 if ( !threadHandle ) {
5010 errorType = RtAudioError::THREAD_ERROR;
5011 errorText_ = "RtApiWasapi::wasapiThread: Unable to instantiate stream abort thread.";
5014 else if ( !CloseHandle( threadHandle ) ) {
5015 errorType = RtAudioError::THREAD_ERROR;
5016 errorText_ = "RtApiWasapi::wasapiThread: Unable to close stream abort thread handle.";
5020 callbackStopped = true;
5027 // 1. Convert callback buffer to stream format
5028 // 2. Push callback buffer into outputBuffer
5030 if ( renderAudioClient && callbackPulled ) {
5031 if ( stream_.doConvertBuffer[OUTPUT] ) {
5032 // Convert callback buffer to stream format
5033 convertBuffer( stream_.deviceBuffer,
5034 stream_.userBuffer[OUTPUT],
5035 stream_.convertInfo[OUTPUT] );
5039 // Push callback buffer into outputBuffer
5040 callbackPushed = renderBuffer.pushBuffer( stream_.deviceBuffer,
5041 stream_.bufferSize * stream_.nDeviceChannels[OUTPUT],
5042 stream_.deviceFormat[OUTPUT] );
5045 // if there is no render stream, set callbackPushed flag
5046 callbackPushed = true;
5051 // 1. Get capture buffer from stream
5052 // 2. Push capture buffer into inputBuffer
5053 // 3. If 2. was successful: Release capture buffer
5055 if ( captureAudioClient ) {
5056 // if the callback input buffer was not pulled from captureBuffer, wait for next capture event
5057 if ( !callbackPulled ) {
5058 WaitForSingleObject( captureEvent, INFINITE );
5061 // Get capture buffer from stream
5062 hr = captureClient->GetBuffer( &streamBuffer,
5064 &captureFlags, NULL, NULL );
5065 if ( FAILED( hr ) ) {
5066 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve capture buffer.";
5070 if ( bufferFrameCount != 0 ) {
5071 // Push capture buffer into inputBuffer
5072 if ( captureBuffer.pushBuffer( ( char* ) streamBuffer,
5073 bufferFrameCount * stream_.nDeviceChannels[INPUT],
5074 stream_.deviceFormat[INPUT] ) )
5076 // Release capture buffer
5077 hr = captureClient->ReleaseBuffer( bufferFrameCount );
5078 if ( FAILED( hr ) ) {
5079 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5085 // Inform WASAPI that capture was unsuccessful
5086 hr = captureClient->ReleaseBuffer( 0 );
5087 if ( FAILED( hr ) ) {
5088 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5095 // Inform WASAPI that capture was unsuccessful
5096 hr = captureClient->ReleaseBuffer( 0 );
5097 if ( FAILED( hr ) ) {
5098 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5106 // 1. Get render buffer from stream
5107 // 2. Pull next buffer from outputBuffer
5108 // 3. If 2. was successful: Fill render buffer with next buffer
5109 // Release render buffer
5111 if ( renderAudioClient ) {
5112 // if the callback output buffer was not pushed to renderBuffer, wait for next render event
5113 if ( callbackPulled && !callbackPushed ) {
5114 WaitForSingleObject( renderEvent, INFINITE );
5117 // Get render buffer from stream
5118 hr = renderAudioClient->GetBufferSize( &bufferFrameCount );
5119 if ( FAILED( hr ) ) {
5120 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer size.";
5124 hr = renderAudioClient->GetCurrentPadding( &numFramesPadding );
5125 if ( FAILED( hr ) ) {
5126 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer padding.";
5130 bufferFrameCount -= numFramesPadding;
5132 if ( bufferFrameCount != 0 ) {
5133 hr = renderClient->GetBuffer( bufferFrameCount, &streamBuffer );
5134 if ( FAILED( hr ) ) {
5135 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer.";
5139 // Pull next buffer from outputBuffer
5140 // Fill render buffer with next buffer
5141 if ( renderBuffer.pullBuffer( ( char* ) streamBuffer,
5142 bufferFrameCount * stream_.nDeviceChannels[OUTPUT],
5143 stream_.deviceFormat[OUTPUT] ) )
5145 // Release render buffer
5146 hr = renderClient->ReleaseBuffer( bufferFrameCount, 0 );
5147 if ( FAILED( hr ) ) {
5148 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5154 // Inform WASAPI that render was unsuccessful
5155 hr = renderClient->ReleaseBuffer( 0, 0 );
5156 if ( FAILED( hr ) ) {
5157 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5164 // Inform WASAPI that render was unsuccessful
5165 hr = renderClient->ReleaseBuffer( 0, 0 );
5166 if ( FAILED( hr ) ) {
5167 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5173 // if the callback buffer was pushed renderBuffer reset callbackPulled flag
5174 if ( callbackPushed ) {
5175 callbackPulled = false;
5177 RtApi::tickStreamTime();
5184 CoTaskMemFree( captureFormat );
5185 CoTaskMemFree( renderFormat );
5189 // update stream state
5190 stream_.state = STREAM_STOPPED;
5192 if ( errorText_.empty() )
5198 //******************** End of __WINDOWS_WASAPI__ *********************//
5202 #if defined(__WINDOWS_DS__) // Windows DirectSound API
5204 // Modified by Robin Davies, October 2005
5205 // - Improvements to DirectX pointer chasing.
5206 // - Bug fix for non-power-of-two Asio granularity used by Edirol PCR-A30.
5207 // - Auto-call CoInitialize for DSOUND and ASIO platforms.
5208 // Various revisions for RtAudio 4.0 by Gary Scavone, April 2007
5209 // Changed device query structure for RtAudio 4.0.7, January 2010
5211 #include <mmsystem.h>
5215 #include <algorithm>
5217 #if defined(__MINGW32__)
5218 // missing from latest mingw winapi
5219 #define WAVE_FORMAT_96M08 0x00010000 /* 96 kHz, Mono, 8-bit */
5220 #define WAVE_FORMAT_96S08 0x00020000 /* 96 kHz, Stereo, 8-bit */
5221 #define WAVE_FORMAT_96M16 0x00040000 /* 96 kHz, Mono, 16-bit */
5222 #define WAVE_FORMAT_96S16 0x00080000 /* 96 kHz, Stereo, 16-bit */
5225 #define MINIMUM_DEVICE_BUFFER_SIZE 32768
5227 #ifdef _MSC_VER // if Microsoft Visual C++
5228 #pragma comment( lib, "winmm.lib" ) // then, auto-link winmm.lib. Otherwise, it has to be added manually.
5231 static inline DWORD dsPointerBetween( DWORD pointer, DWORD laterPointer, DWORD earlierPointer, DWORD bufferSize )
5233 if ( pointer > bufferSize ) pointer -= bufferSize;
5234 if ( laterPointer < earlierPointer ) laterPointer += bufferSize;
5235 if ( pointer < earlierPointer ) pointer += bufferSize;
5236 return pointer >= earlierPointer && pointer < laterPointer;
5239 // A structure to hold various information related to the DirectSound
5240 // API implementation.
5242 unsigned int drainCounter; // Tracks callback counts when draining
5243 bool internalDrain; // Indicates if stop is initiated from callback or not.
5247 UINT bufferPointer[2];
5248 DWORD dsBufferSize[2];
5249 DWORD dsPointerLeadTime[2]; // the number of bytes ahead of the safe pointer to lead by.
5253 :drainCounter(0), internalDrain(false) { id[0] = 0; id[1] = 0; buffer[0] = 0; buffer[1] = 0; xrun[0] = false; xrun[1] = false; bufferPointer[0] = 0; bufferPointer[1] = 0; }
5256 // Declarations for utility functions, callbacks, and structures
5257 // specific to the DirectSound implementation.
5258 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
5259 LPCTSTR description,
5263 static const char* getErrorString( int code );
5265 static unsigned __stdcall callbackHandler( void *ptr );
5274 : found(false) { validId[0] = false; validId[1] = false; }
5277 struct DsProbeData {
5279 std::vector<struct DsDevice>* dsDevices;
5282 RtApiDs :: RtApiDs()
5284 // Dsound will run both-threaded. If CoInitialize fails, then just
5285 // accept whatever the mainline chose for a threading model.
5286 coInitialized_ = false;
5287 HRESULT hr = CoInitialize( NULL );
5288 if ( !FAILED( hr ) ) coInitialized_ = true;
5291 RtApiDs :: ~RtApiDs()
5293 if ( stream_.state != STREAM_CLOSED ) closeStream();
5294 if ( coInitialized_ ) CoUninitialize(); // balanced call.
5297 // The DirectSound default output is always the first device.
5298 unsigned int RtApiDs :: getDefaultOutputDevice( void )
5303 // The DirectSound default input is always the first input device,
5304 // which is the first capture device enumerated.
5305 unsigned int RtApiDs :: getDefaultInputDevice( void )
5310 unsigned int RtApiDs :: getDeviceCount( void )
5312 // Set query flag for previously found devices to false, so that we
5313 // can check for any devices that have disappeared.
5314 for ( unsigned int i=0; i<dsDevices.size(); i++ )
5315 dsDevices[i].found = false;
5317 // Query DirectSound devices.
5318 struct DsProbeData probeInfo;
5319 probeInfo.isInput = false;
5320 probeInfo.dsDevices = &dsDevices;
5321 HRESULT result = DirectSoundEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
5322 if ( FAILED( result ) ) {
5323 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating output devices!";
5324 errorText_ = errorStream_.str();
5325 error( RtAudioError::WARNING );
5328 // Query DirectSoundCapture devices.
5329 probeInfo.isInput = true;
5330 result = DirectSoundCaptureEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
5331 if ( FAILED( result ) ) {
5332 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating input devices!";
5333 errorText_ = errorStream_.str();
5334 error( RtAudioError::WARNING );
5337 // Clean out any devices that may have disappeared (code update submitted by Eli Zehngut).
5338 for ( unsigned int i=0; i<dsDevices.size(); ) {
5339 if ( dsDevices[i].found == false ) dsDevices.erase( dsDevices.begin() + i );
5343 return static_cast<unsigned int>(dsDevices.size());
5346 RtAudio::DeviceInfo RtApiDs :: getDeviceInfo( unsigned int device )
5348 RtAudio::DeviceInfo info;
5349 info.probed = false;
5351 if ( dsDevices.size() == 0 ) {
5352 // Force a query of all devices
5354 if ( dsDevices.size() == 0 ) {
5355 errorText_ = "RtApiDs::getDeviceInfo: no devices found!";
5356 error( RtAudioError::INVALID_USE );
5361 if ( device >= dsDevices.size() ) {
5362 errorText_ = "RtApiDs::getDeviceInfo: device ID is invalid!";
5363 error( RtAudioError::INVALID_USE );
5368 if ( dsDevices[ device ].validId[0] == false ) goto probeInput;
5370 LPDIRECTSOUND output;
5372 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
5373 if ( FAILED( result ) ) {
5374 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
5375 errorText_ = errorStream_.str();
5376 error( RtAudioError::WARNING );
5380 outCaps.dwSize = sizeof( outCaps );
5381 result = output->GetCaps( &outCaps );
5382 if ( FAILED( result ) ) {
5384 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting capabilities!";
5385 errorText_ = errorStream_.str();
5386 error( RtAudioError::WARNING );
5390 // Get output channel information.
5391 info.outputChannels = ( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ? 2 : 1;
5393 // Get sample rate information.
5394 info.sampleRates.clear();
5395 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
5396 if ( SAMPLE_RATES[k] >= (unsigned int) outCaps.dwMinSecondarySampleRate &&
5397 SAMPLE_RATES[k] <= (unsigned int) outCaps.dwMaxSecondarySampleRate ) {
5398 info.sampleRates.push_back( SAMPLE_RATES[k] );
5400 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
5401 info.preferredSampleRate = SAMPLE_RATES[k];
5405 // Get format information.
5406 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT ) info.nativeFormats |= RTAUDIO_SINT16;
5407 if ( outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) info.nativeFormats |= RTAUDIO_SINT8;
5411 if ( getDefaultOutputDevice() == device )
5412 info.isDefaultOutput = true;
5414 if ( dsDevices[ device ].validId[1] == false ) {
5415 info.name = dsDevices[ device ].name;
5422 LPDIRECTSOUNDCAPTURE input;
5423 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
5424 if ( FAILED( result ) ) {
5425 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
5426 errorText_ = errorStream_.str();
5427 error( RtAudioError::WARNING );
5432 inCaps.dwSize = sizeof( inCaps );
5433 result = input->GetCaps( &inCaps );
5434 if ( FAILED( result ) ) {
5436 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting object capabilities (" << dsDevices[ device ].name << ")!";
5437 errorText_ = errorStream_.str();
5438 error( RtAudioError::WARNING );
5442 // Get input channel information.
5443 info.inputChannels = inCaps.dwChannels;
5445 // Get sample rate and format information.
5446 std::vector<unsigned int> rates;
5447 if ( inCaps.dwChannels >= 2 ) {
5448 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5449 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5450 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5451 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5452 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5453 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5454 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5455 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5457 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
5458 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) rates.push_back( 11025 );
5459 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) rates.push_back( 22050 );
5460 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) rates.push_back( 44100 );
5461 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) rates.push_back( 96000 );
5463 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
5464 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) rates.push_back( 11025 );
5465 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) rates.push_back( 22050 );
5466 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) rates.push_back( 44100 );
5467 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) rates.push_back( 96000 );
5470 else if ( inCaps.dwChannels == 1 ) {
5471 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5472 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5473 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5474 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5475 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5476 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5477 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5478 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5480 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
5481 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) rates.push_back( 11025 );
5482 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) rates.push_back( 22050 );
5483 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) rates.push_back( 44100 );
5484 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) rates.push_back( 96000 );
5486 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
5487 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) rates.push_back( 11025 );
5488 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) rates.push_back( 22050 );
5489 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) rates.push_back( 44100 );
5490 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) rates.push_back( 96000 );
5493 else info.inputChannels = 0; // technically, this would be an error
5497 if ( info.inputChannels == 0 ) return info;
5499 // Copy the supported rates to the info structure but avoid duplication.
5501 for ( unsigned int i=0; i<rates.size(); i++ ) {
5503 for ( unsigned int j=0; j<info.sampleRates.size(); j++ ) {
5504 if ( rates[i] == info.sampleRates[j] ) {
5509 if ( found == false ) info.sampleRates.push_back( rates[i] );
5511 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
5513 // If device opens for both playback and capture, we determine the channels.
5514 if ( info.outputChannels > 0 && info.inputChannels > 0 )
5515 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
5517 if ( device == 0 ) info.isDefaultInput = true;
5519 // Copy name and return.
5520 info.name = dsDevices[ device ].name;
5525 bool RtApiDs :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
5526 unsigned int firstChannel, unsigned int sampleRate,
5527 RtAudioFormat format, unsigned int *bufferSize,
5528 RtAudio::StreamOptions *options )
5530 if ( channels + firstChannel > 2 ) {
5531 errorText_ = "RtApiDs::probeDeviceOpen: DirectSound does not support more than 2 channels per device.";
5535 size_t nDevices = dsDevices.size();
5536 if ( nDevices == 0 ) {
5537 // This should not happen because a check is made before this function is called.
5538 errorText_ = "RtApiDs::probeDeviceOpen: no devices found!";
5542 if ( device >= nDevices ) {
5543 // This should not happen because a check is made before this function is called.
5544 errorText_ = "RtApiDs::probeDeviceOpen: device ID is invalid!";
5548 if ( mode == OUTPUT ) {
5549 if ( dsDevices[ device ].validId[0] == false ) {
5550 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support output!";
5551 errorText_ = errorStream_.str();
5555 else { // mode == INPUT
5556 if ( dsDevices[ device ].validId[1] == false ) {
5557 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support input!";
5558 errorText_ = errorStream_.str();
5563 // According to a note in PortAudio, using GetDesktopWindow()
5564 // instead of GetForegroundWindow() is supposed to avoid problems
5565 // that occur when the application's window is not the foreground
5566 // window. Also, if the application window closes before the
5567 // DirectSound buffer, DirectSound can crash. In the past, I had
5568 // problems when using GetDesktopWindow() but it seems fine now
5569 // (January 2010). I'll leave it commented here.
5570 // HWND hWnd = GetForegroundWindow();
5571 HWND hWnd = GetDesktopWindow();
5573 // Check the numberOfBuffers parameter and limit the lowest value to
5574 // two. This is a judgement call and a value of two is probably too
5575 // low for capture, but it should work for playback.
5577 if ( options ) nBuffers = options->numberOfBuffers;
5578 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) nBuffers = 2;
5579 if ( nBuffers < 2 ) nBuffers = 3;
5581 // Check the lower range of the user-specified buffer size and set
5582 // (arbitrarily) to a lower bound of 32.
5583 if ( *bufferSize < 32 ) *bufferSize = 32;
5585 // Create the wave format structure. The data format setting will
5586 // be determined later.
5587 WAVEFORMATEX waveFormat;
5588 ZeroMemory( &waveFormat, sizeof(WAVEFORMATEX) );
5589 waveFormat.wFormatTag = WAVE_FORMAT_PCM;
5590 waveFormat.nChannels = channels + firstChannel;
5591 waveFormat.nSamplesPerSec = (unsigned long) sampleRate;
5593 // Determine the device buffer size. By default, we'll use the value
5594 // defined above (32K), but we will grow it to make allowances for
5595 // very large software buffer sizes.
5596 DWORD dsBufferSize = MINIMUM_DEVICE_BUFFER_SIZE;
5597 DWORD dsPointerLeadTime = 0;
5599 void *ohandle = 0, *bhandle = 0;
5601 if ( mode == OUTPUT ) {
5603 LPDIRECTSOUND output;
5604 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
5605 if ( FAILED( result ) ) {
5606 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
5607 errorText_ = errorStream_.str();
5612 outCaps.dwSize = sizeof( outCaps );
5613 result = output->GetCaps( &outCaps );
5614 if ( FAILED( result ) ) {
5616 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting capabilities (" << dsDevices[ device ].name << ")!";
5617 errorText_ = errorStream_.str();
5621 // Check channel information.
5622 if ( channels + firstChannel == 2 && !( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ) {
5623 errorStream_ << "RtApiDs::getDeviceInfo: the output device (" << dsDevices[ device ].name << ") does not support stereo playback.";
5624 errorText_ = errorStream_.str();
5628 // Check format information. Use 16-bit format unless not
5629 // supported or user requests 8-bit.
5630 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT &&
5631 !( format == RTAUDIO_SINT8 && outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) ) {
5632 waveFormat.wBitsPerSample = 16;
5633 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
5636 waveFormat.wBitsPerSample = 8;
5637 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
5639 stream_.userFormat = format;
5641 // Update wave format structure and buffer information.
5642 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
5643 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
5644 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
5646 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
5647 while ( dsPointerLeadTime * 2U > dsBufferSize )
5650 // Set cooperative level to DSSCL_EXCLUSIVE ... sound stops when window focus changes.
5651 // result = output->SetCooperativeLevel( hWnd, DSSCL_EXCLUSIVE );
5652 // Set cooperative level to DSSCL_PRIORITY ... sound remains when window focus changes.
5653 result = output->SetCooperativeLevel( hWnd, DSSCL_PRIORITY );
5654 if ( FAILED( result ) ) {
5656 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting cooperative level (" << dsDevices[ device ].name << ")!";
5657 errorText_ = errorStream_.str();
5661 // Even though we will write to the secondary buffer, we need to
5662 // access the primary buffer to set the correct output format
5663 // (since the default is 8-bit, 22 kHz!). Setup the DS primary
5664 // buffer description.
5665 DSBUFFERDESC bufferDescription;
5666 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
5667 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
5668 bufferDescription.dwFlags = DSBCAPS_PRIMARYBUFFER;
5670 // Obtain the primary buffer
5671 LPDIRECTSOUNDBUFFER buffer;
5672 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
5673 if ( FAILED( result ) ) {
5675 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") accessing primary buffer (" << dsDevices[ device ].name << ")!";
5676 errorText_ = errorStream_.str();
5680 // Set the primary DS buffer sound format.
5681 result = buffer->SetFormat( &waveFormat );
5682 if ( FAILED( result ) ) {
5684 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting primary buffer format (" << dsDevices[ device ].name << ")!";
5685 errorText_ = errorStream_.str();
5689 // Setup the secondary DS buffer description.
5690 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
5691 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
5692 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
5693 DSBCAPS_GLOBALFOCUS |
5694 DSBCAPS_GETCURRENTPOSITION2 |
5695 DSBCAPS_LOCHARDWARE ); // Force hardware mixing
5696 bufferDescription.dwBufferBytes = dsBufferSize;
5697 bufferDescription.lpwfxFormat = &waveFormat;
5699 // Try to create the secondary DS buffer. If that doesn't work,
5700 // try to use software mixing. Otherwise, there's a problem.
5701 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
5702 if ( FAILED( result ) ) {
5703 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
5704 DSBCAPS_GLOBALFOCUS |
5705 DSBCAPS_GETCURRENTPOSITION2 |
5706 DSBCAPS_LOCSOFTWARE ); // Force software mixing
5707 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
5708 if ( FAILED( result ) ) {
5710 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating secondary buffer (" << dsDevices[ device ].name << ")!";
5711 errorText_ = errorStream_.str();
5716 // Get the buffer size ... might be different from what we specified.
5718 dsbcaps.dwSize = sizeof( DSBCAPS );
5719 result = buffer->GetCaps( &dsbcaps );
5720 if ( FAILED( result ) ) {
5723 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
5724 errorText_ = errorStream_.str();
5728 dsBufferSize = dsbcaps.dwBufferBytes;
5730 // Lock the DS buffer
5733 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
5734 if ( FAILED( result ) ) {
5737 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking buffer (" << dsDevices[ device ].name << ")!";
5738 errorText_ = errorStream_.str();
5742 // Zero the DS buffer
5743 ZeroMemory( audioPtr, dataLen );
5745 // Unlock the DS buffer
5746 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
5747 if ( FAILED( result ) ) {
5750 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking buffer (" << dsDevices[ device ].name << ")!";
5751 errorText_ = errorStream_.str();
5755 ohandle = (void *) output;
5756 bhandle = (void *) buffer;
5759 if ( mode == INPUT ) {
5761 LPDIRECTSOUNDCAPTURE input;
5762 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
5763 if ( FAILED( result ) ) {
5764 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
5765 errorText_ = errorStream_.str();
5770 inCaps.dwSize = sizeof( inCaps );
5771 result = input->GetCaps( &inCaps );
5772 if ( FAILED( result ) ) {
5774 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting input capabilities (" << dsDevices[ device ].name << ")!";
5775 errorText_ = errorStream_.str();
5779 // Check channel information.
5780 if ( inCaps.dwChannels < channels + firstChannel ) {
5781 errorText_ = "RtApiDs::getDeviceInfo: the input device does not support requested input channels.";
5785 // Check format information. Use 16-bit format unless user
5787 DWORD deviceFormats;
5788 if ( channels + firstChannel == 2 ) {
5789 deviceFormats = WAVE_FORMAT_1S08 | WAVE_FORMAT_2S08 | WAVE_FORMAT_4S08 | WAVE_FORMAT_96S08;
5790 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
5791 waveFormat.wBitsPerSample = 8;
5792 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
5794 else { // assume 16-bit is supported
5795 waveFormat.wBitsPerSample = 16;
5796 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
5799 else { // channel == 1
5800 deviceFormats = WAVE_FORMAT_1M08 | WAVE_FORMAT_2M08 | WAVE_FORMAT_4M08 | WAVE_FORMAT_96M08;
5801 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
5802 waveFormat.wBitsPerSample = 8;
5803 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
5805 else { // assume 16-bit is supported
5806 waveFormat.wBitsPerSample = 16;
5807 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
5810 stream_.userFormat = format;
5812 // Update wave format structure and buffer information.
5813 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
5814 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
5815 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
5817 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
5818 while ( dsPointerLeadTime * 2U > dsBufferSize )
5821 // Setup the secondary DS buffer description.
5822 DSCBUFFERDESC bufferDescription;
5823 ZeroMemory( &bufferDescription, sizeof( DSCBUFFERDESC ) );
5824 bufferDescription.dwSize = sizeof( DSCBUFFERDESC );
5825 bufferDescription.dwFlags = 0;
5826 bufferDescription.dwReserved = 0;
5827 bufferDescription.dwBufferBytes = dsBufferSize;
5828 bufferDescription.lpwfxFormat = &waveFormat;
5830 // Create the capture buffer.
5831 LPDIRECTSOUNDCAPTUREBUFFER buffer;
5832 result = input->CreateCaptureBuffer( &bufferDescription, &buffer, NULL );
5833 if ( FAILED( result ) ) {
5835 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating input buffer (" << dsDevices[ device ].name << ")!";
5836 errorText_ = errorStream_.str();
5840 // Get the buffer size ... might be different from what we specified.
5842 dscbcaps.dwSize = sizeof( DSCBCAPS );
5843 result = buffer->GetCaps( &dscbcaps );
5844 if ( FAILED( result ) ) {
5847 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
5848 errorText_ = errorStream_.str();
5852 dsBufferSize = dscbcaps.dwBufferBytes;
5854 // NOTE: We could have a problem here if this is a duplex stream
5855 // and the play and capture hardware buffer sizes are different
5856 // (I'm actually not sure if that is a problem or not).
5857 // Currently, we are not verifying that.
5859 // Lock the capture buffer
5862 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
5863 if ( FAILED( result ) ) {
5866 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking input buffer (" << dsDevices[ device ].name << ")!";
5867 errorText_ = errorStream_.str();
5872 ZeroMemory( audioPtr, dataLen );
5874 // Unlock the buffer
5875 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
5876 if ( FAILED( result ) ) {
5879 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking input buffer (" << dsDevices[ device ].name << ")!";
5880 errorText_ = errorStream_.str();
5884 ohandle = (void *) input;
5885 bhandle = (void *) buffer;
5888 // Set various stream parameters
5889 DsHandle *handle = 0;
5890 stream_.nDeviceChannels[mode] = channels + firstChannel;
5891 stream_.nUserChannels[mode] = channels;
5892 stream_.bufferSize = *bufferSize;
5893 stream_.channelOffset[mode] = firstChannel;
5894 stream_.deviceInterleaved[mode] = true;
5895 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
5896 else stream_.userInterleaved = true;
5898 // Set flag for buffer conversion
5899 stream_.doConvertBuffer[mode] = false;
5900 if (stream_.nUserChannels[mode] != stream_.nDeviceChannels[mode])
5901 stream_.doConvertBuffer[mode] = true;
5902 if (stream_.userFormat != stream_.deviceFormat[mode])
5903 stream_.doConvertBuffer[mode] = true;
5904 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
5905 stream_.nUserChannels[mode] > 1 )
5906 stream_.doConvertBuffer[mode] = true;
5908 // Allocate necessary internal buffers
5909 long bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
5910 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
5911 if ( stream_.userBuffer[mode] == NULL ) {
5912 errorText_ = "RtApiDs::probeDeviceOpen: error allocating user buffer memory.";
5916 if ( stream_.doConvertBuffer[mode] ) {
5918 bool makeBuffer = true;
5919 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
5920 if ( mode == INPUT ) {
5921 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
5922 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
5923 if ( bufferBytes <= (long) bytesOut ) makeBuffer = false;
5928 bufferBytes *= *bufferSize;
5929 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
5930 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
5931 if ( stream_.deviceBuffer == NULL ) {
5932 errorText_ = "RtApiDs::probeDeviceOpen: error allocating device buffer memory.";
5938 // Allocate our DsHandle structures for the stream.
5939 if ( stream_.apiHandle == 0 ) {
5941 handle = new DsHandle;
5943 catch ( std::bad_alloc& ) {
5944 errorText_ = "RtApiDs::probeDeviceOpen: error allocating AsioHandle memory.";
5948 // Create a manual-reset event.
5949 handle->condition = CreateEvent( NULL, // no security
5950 TRUE, // manual-reset
5951 FALSE, // non-signaled initially
5953 stream_.apiHandle = (void *) handle;
5956 handle = (DsHandle *) stream_.apiHandle;
5957 handle->id[mode] = ohandle;
5958 handle->buffer[mode] = bhandle;
5959 handle->dsBufferSize[mode] = dsBufferSize;
5960 handle->dsPointerLeadTime[mode] = dsPointerLeadTime;
5962 stream_.device[mode] = device;
5963 stream_.state = STREAM_STOPPED;
5964 if ( stream_.mode == OUTPUT && mode == INPUT )
5965 // We had already set up an output stream.
5966 stream_.mode = DUPLEX;
5968 stream_.mode = mode;
5969 stream_.nBuffers = nBuffers;
5970 stream_.sampleRate = sampleRate;
5972 // Setup the buffer conversion information structure.
5973 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
5975 // Setup the callback thread.
5976 if ( stream_.callbackInfo.isRunning == false ) {
5978 stream_.callbackInfo.isRunning = true;
5979 stream_.callbackInfo.object = (void *) this;
5980 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &callbackHandler,
5981 &stream_.callbackInfo, 0, &threadId );
5982 if ( stream_.callbackInfo.thread == 0 ) {
5983 errorText_ = "RtApiDs::probeDeviceOpen: error creating callback thread!";
5987 // Boost DS thread priority
5988 SetThreadPriority( (HANDLE) stream_.callbackInfo.thread, THREAD_PRIORITY_HIGHEST );
5994 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
5995 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
5996 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
5997 if ( buffer ) buffer->Release();
6000 if ( handle->buffer[1] ) {
6001 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
6002 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6003 if ( buffer ) buffer->Release();
6006 CloseHandle( handle->condition );
6008 stream_.apiHandle = 0;
6011 for ( int i=0; i<2; i++ ) {
6012 if ( stream_.userBuffer[i] ) {
6013 free( stream_.userBuffer[i] );
6014 stream_.userBuffer[i] = 0;
6018 if ( stream_.deviceBuffer ) {
6019 free( stream_.deviceBuffer );
6020 stream_.deviceBuffer = 0;
6023 stream_.state = STREAM_CLOSED;
6027 void RtApiDs :: closeStream()
6029 if ( stream_.state == STREAM_CLOSED ) {
6030 errorText_ = "RtApiDs::closeStream(): no open stream to close!";
6031 error( RtAudioError::WARNING );
6035 // Stop the callback thread.
6036 stream_.callbackInfo.isRunning = false;
6037 WaitForSingleObject( (HANDLE) stream_.callbackInfo.thread, INFINITE );
6038 CloseHandle( (HANDLE) stream_.callbackInfo.thread );
6040 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6042 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
6043 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
6044 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6051 if ( handle->buffer[1] ) {
6052 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
6053 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6060 CloseHandle( handle->condition );
6062 stream_.apiHandle = 0;
6065 for ( int i=0; i<2; i++ ) {
6066 if ( stream_.userBuffer[i] ) {
6067 free( stream_.userBuffer[i] );
6068 stream_.userBuffer[i] = 0;
6072 if ( stream_.deviceBuffer ) {
6073 free( stream_.deviceBuffer );
6074 stream_.deviceBuffer = 0;
6077 stream_.mode = UNINITIALIZED;
6078 stream_.state = STREAM_CLOSED;
6081 void RtApiDs :: startStream()
6084 if ( stream_.state == STREAM_RUNNING ) {
6085 errorText_ = "RtApiDs::startStream(): the stream is already running!";
6086 error( RtAudioError::WARNING );
6090 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6092 // Increase scheduler frequency on lesser windows (a side-effect of
6093 // increasing timer accuracy). On greater windows (Win2K or later),
6094 // this is already in effect.
6095 timeBeginPeriod( 1 );
6097 buffersRolling = false;
6098 duplexPrerollBytes = 0;
6100 if ( stream_.mode == DUPLEX ) {
6101 // 0.5 seconds of silence in DUPLEX mode while the devices spin up and synchronize.
6102 duplexPrerollBytes = (int) ( 0.5 * stream_.sampleRate * formatBytes( stream_.deviceFormat[1] ) * stream_.nDeviceChannels[1] );
6106 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6108 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6109 result = buffer->Play( 0, 0, DSBPLAY_LOOPING );
6110 if ( FAILED( result ) ) {
6111 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting output buffer!";
6112 errorText_ = errorStream_.str();
6117 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6119 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6120 result = buffer->Start( DSCBSTART_LOOPING );
6121 if ( FAILED( result ) ) {
6122 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting input buffer!";
6123 errorText_ = errorStream_.str();
6128 handle->drainCounter = 0;
6129 handle->internalDrain = false;
6130 ResetEvent( handle->condition );
6131 stream_.state = STREAM_RUNNING;
6134 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
6137 void RtApiDs :: stopStream()
6140 if ( stream_.state == STREAM_STOPPED ) {
6141 errorText_ = "RtApiDs::stopStream(): the stream is already stopped!";
6142 error( RtAudioError::WARNING );
6149 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6150 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6151 if ( handle->drainCounter == 0 ) {
6152 handle->drainCounter = 2;
6153 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
6156 stream_.state = STREAM_STOPPED;
6158 MUTEX_LOCK( &stream_.mutex );
6160 // Stop the buffer and clear memory
6161 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6162 result = buffer->Stop();
6163 if ( FAILED( result ) ) {
6164 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping output buffer!";
6165 errorText_ = errorStream_.str();
6169 // Lock the buffer and clear it so that if we start to play again,
6170 // we won't have old data playing.
6171 result = buffer->Lock( 0, handle->dsBufferSize[0], &audioPtr, &dataLen, NULL, NULL, 0 );
6172 if ( FAILED( result ) ) {
6173 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking output buffer!";
6174 errorText_ = errorStream_.str();
6178 // Zero the DS buffer
6179 ZeroMemory( audioPtr, dataLen );
6181 // Unlock the DS buffer
6182 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6183 if ( FAILED( result ) ) {
6184 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking output buffer!";
6185 errorText_ = errorStream_.str();
6189 // If we start playing again, we must begin at beginning of buffer.
6190 handle->bufferPointer[0] = 0;
6193 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6194 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6198 stream_.state = STREAM_STOPPED;
6200 if ( stream_.mode != DUPLEX )
6201 MUTEX_LOCK( &stream_.mutex );
6203 result = buffer->Stop();
6204 if ( FAILED( result ) ) {
6205 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping input buffer!";
6206 errorText_ = errorStream_.str();
6210 // Lock the buffer and clear it so that if we start to play again,
6211 // we won't have old data playing.
6212 result = buffer->Lock( 0, handle->dsBufferSize[1], &audioPtr, &dataLen, NULL, NULL, 0 );
6213 if ( FAILED( result ) ) {
6214 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking input buffer!";
6215 errorText_ = errorStream_.str();
6219 // Zero the DS buffer
6220 ZeroMemory( audioPtr, dataLen );
6222 // Unlock the DS buffer
6223 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6224 if ( FAILED( result ) ) {
6225 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking input buffer!";
6226 errorText_ = errorStream_.str();
6230 // If we start recording again, we must begin at beginning of buffer.
6231 handle->bufferPointer[1] = 0;
6235 timeEndPeriod( 1 ); // revert to normal scheduler frequency on lesser windows.
6236 MUTEX_UNLOCK( &stream_.mutex );
6238 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
6241 void RtApiDs :: abortStream()
6244 if ( stream_.state == STREAM_STOPPED ) {
6245 errorText_ = "RtApiDs::abortStream(): the stream is already stopped!";
6246 error( RtAudioError::WARNING );
6250 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6251 handle->drainCounter = 2;
6256 void RtApiDs :: callbackEvent()
6258 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) {
6259 Sleep( 50 ); // sleep 50 milliseconds
6263 if ( stream_.state == STREAM_CLOSED ) {
6264 errorText_ = "RtApiDs::callbackEvent(): the stream is closed ... this shouldn't happen!";
6265 error( RtAudioError::WARNING );
6269 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
6270 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6272 // Check if we were draining the stream and signal is finished.
6273 if ( handle->drainCounter > stream_.nBuffers + 2 ) {
6275 stream_.state = STREAM_STOPPING;
6276 if ( handle->internalDrain == false )
6277 SetEvent( handle->condition );
6283 // Invoke user callback to get fresh output data UNLESS we are
6285 if ( handle->drainCounter == 0 ) {
6286 RtAudioCallback callback = (RtAudioCallback) info->callback;
6287 double streamTime = getStreamTime();
6288 RtAudioStreamStatus status = 0;
6289 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
6290 status |= RTAUDIO_OUTPUT_UNDERFLOW;
6291 handle->xrun[0] = false;
6293 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
6294 status |= RTAUDIO_INPUT_OVERFLOW;
6295 handle->xrun[1] = false;
6297 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
6298 stream_.bufferSize, streamTime, status, info->userData );
6299 if ( cbReturnValue == 2 ) {
6300 stream_.state = STREAM_STOPPING;
6301 handle->drainCounter = 2;
6305 else if ( cbReturnValue == 1 ) {
6306 handle->drainCounter = 1;
6307 handle->internalDrain = true;
6312 DWORD currentWritePointer, safeWritePointer;
6313 DWORD currentReadPointer, safeReadPointer;
6314 UINT nextWritePointer;
6316 LPVOID buffer1 = NULL;
6317 LPVOID buffer2 = NULL;
6318 DWORD bufferSize1 = 0;
6319 DWORD bufferSize2 = 0;
6324 MUTEX_LOCK( &stream_.mutex );
6325 if ( stream_.state == STREAM_STOPPED ) {
6326 MUTEX_UNLOCK( &stream_.mutex );
6330 if ( buffersRolling == false ) {
6331 if ( stream_.mode == DUPLEX ) {
6332 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
6334 // It takes a while for the devices to get rolling. As a result,
6335 // there's no guarantee that the capture and write device pointers
6336 // will move in lockstep. Wait here for both devices to start
6337 // rolling, and then set our buffer pointers accordingly.
6338 // e.g. Crystal Drivers: the capture buffer starts up 5700 to 9600
6339 // bytes later than the write buffer.
6341 // Stub: a serious risk of having a pre-emptive scheduling round
6342 // take place between the two GetCurrentPosition calls... but I'm
6343 // really not sure how to solve the problem. Temporarily boost to
6344 // Realtime priority, maybe; but I'm not sure what priority the
6345 // DirectSound service threads run at. We *should* be roughly
6346 // within a ms or so of correct.
6348 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6349 LPDIRECTSOUNDCAPTUREBUFFER dsCaptureBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6351 DWORD startSafeWritePointer, startSafeReadPointer;
6353 result = dsWriteBuffer->GetCurrentPosition( NULL, &startSafeWritePointer );
6354 if ( FAILED( result ) ) {
6355 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6356 errorText_ = errorStream_.str();
6357 MUTEX_UNLOCK( &stream_.mutex );
6358 error( RtAudioError::SYSTEM_ERROR );
6361 result = dsCaptureBuffer->GetCurrentPosition( NULL, &startSafeReadPointer );
6362 if ( FAILED( result ) ) {
6363 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6364 errorText_ = errorStream_.str();
6365 MUTEX_UNLOCK( &stream_.mutex );
6366 error( RtAudioError::SYSTEM_ERROR );
6370 result = dsWriteBuffer->GetCurrentPosition( NULL, &safeWritePointer );
6371 if ( FAILED( result ) ) {
6372 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6373 errorText_ = errorStream_.str();
6374 MUTEX_UNLOCK( &stream_.mutex );
6375 error( RtAudioError::SYSTEM_ERROR );
6378 result = dsCaptureBuffer->GetCurrentPosition( NULL, &safeReadPointer );
6379 if ( FAILED( result ) ) {
6380 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6381 errorText_ = errorStream_.str();
6382 MUTEX_UNLOCK( &stream_.mutex );
6383 error( RtAudioError::SYSTEM_ERROR );
6386 if ( safeWritePointer != startSafeWritePointer && safeReadPointer != startSafeReadPointer ) break;
6390 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
6392 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
6393 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
6394 handle->bufferPointer[1] = safeReadPointer;
6396 else if ( stream_.mode == OUTPUT ) {
6398 // Set the proper nextWritePosition after initial startup.
6399 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6400 result = dsWriteBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
6401 if ( FAILED( result ) ) {
6402 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6403 errorText_ = errorStream_.str();
6404 MUTEX_UNLOCK( &stream_.mutex );
6405 error( RtAudioError::SYSTEM_ERROR );
6408 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
6409 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
6412 buffersRolling = true;
6415 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6417 LPDIRECTSOUNDBUFFER dsBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6419 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
6420 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
6421 bufferBytes *= formatBytes( stream_.userFormat );
6422 memset( stream_.userBuffer[0], 0, bufferBytes );
6425 // Setup parameters and do buffer conversion if necessary.
6426 if ( stream_.doConvertBuffer[0] ) {
6427 buffer = stream_.deviceBuffer;
6428 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
6429 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[0];
6430 bufferBytes *= formatBytes( stream_.deviceFormat[0] );
6433 buffer = stream_.userBuffer[0];
6434 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
6435 bufferBytes *= formatBytes( stream_.userFormat );
6438 // No byte swapping necessary in DirectSound implementation.
6440 // Ahhh ... windoze. 16-bit data is signed but 8-bit data is
6441 // unsigned. So, we need to convert our signed 8-bit data here to
6443 if ( stream_.deviceFormat[0] == RTAUDIO_SINT8 )
6444 for ( int i=0; i<bufferBytes; i++ ) buffer[i] = (unsigned char) ( buffer[i] + 128 );
6446 DWORD dsBufferSize = handle->dsBufferSize[0];
6447 nextWritePointer = handle->bufferPointer[0];
6449 DWORD endWrite, leadPointer;
6451 // Find out where the read and "safe write" pointers are.
6452 result = dsBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
6453 if ( FAILED( result ) ) {
6454 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6455 errorText_ = errorStream_.str();
6456 MUTEX_UNLOCK( &stream_.mutex );
6457 error( RtAudioError::SYSTEM_ERROR );
6461 // We will copy our output buffer into the region between
6462 // safeWritePointer and leadPointer. If leadPointer is not
6463 // beyond the next endWrite position, wait until it is.
6464 leadPointer = safeWritePointer + handle->dsPointerLeadTime[0];
6465 //std::cout << "safeWritePointer = " << safeWritePointer << ", leadPointer = " << leadPointer << ", nextWritePointer = " << nextWritePointer << std::endl;
6466 if ( leadPointer > dsBufferSize ) leadPointer -= dsBufferSize;
6467 if ( leadPointer < nextWritePointer ) leadPointer += dsBufferSize; // unwrap offset
6468 endWrite = nextWritePointer + bufferBytes;
6470 // Check whether the entire write region is behind the play pointer.
6471 if ( leadPointer >= endWrite ) break;
6473 // If we are here, then we must wait until the leadPointer advances
6474 // beyond the end of our next write region. We use the
6475 // Sleep() function to suspend operation until that happens.
6476 double millis = ( endWrite - leadPointer ) * 1000.0;
6477 millis /= ( formatBytes( stream_.deviceFormat[0]) * stream_.nDeviceChannels[0] * stream_.sampleRate);
6478 if ( millis < 1.0 ) millis = 1.0;
6479 Sleep( (DWORD) millis );
6482 if ( dsPointerBetween( nextWritePointer, safeWritePointer, currentWritePointer, dsBufferSize )
6483 || dsPointerBetween( endWrite, safeWritePointer, currentWritePointer, dsBufferSize ) ) {
6484 // We've strayed into the forbidden zone ... resync the read pointer.
6485 handle->xrun[0] = true;
6486 nextWritePointer = safeWritePointer + handle->dsPointerLeadTime[0] - bufferBytes;
6487 if ( nextWritePointer >= dsBufferSize ) nextWritePointer -= dsBufferSize;
6488 handle->bufferPointer[0] = nextWritePointer;
6489 endWrite = nextWritePointer + bufferBytes;
6492 // Lock free space in the buffer
6493 result = dsBuffer->Lock( nextWritePointer, bufferBytes, &buffer1,
6494 &bufferSize1, &buffer2, &bufferSize2, 0 );
6495 if ( FAILED( result ) ) {
6496 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking buffer during playback!";
6497 errorText_ = errorStream_.str();
6498 MUTEX_UNLOCK( &stream_.mutex );
6499 error( RtAudioError::SYSTEM_ERROR );
6503 // Copy our buffer into the DS buffer
6504 CopyMemory( buffer1, buffer, bufferSize1 );
6505 if ( buffer2 != NULL ) CopyMemory( buffer2, buffer+bufferSize1, bufferSize2 );
6507 // Update our buffer offset and unlock sound buffer
6508 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
6509 if ( FAILED( result ) ) {
6510 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking buffer during playback!";
6511 errorText_ = errorStream_.str();
6512 MUTEX_UNLOCK( &stream_.mutex );
6513 error( RtAudioError::SYSTEM_ERROR );
6516 nextWritePointer = ( nextWritePointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
6517 handle->bufferPointer[0] = nextWritePointer;
6520 // Don't bother draining input
6521 if ( handle->drainCounter ) {
6522 handle->drainCounter++;
6526 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6528 // Setup parameters.
6529 if ( stream_.doConvertBuffer[1] ) {
6530 buffer = stream_.deviceBuffer;
6531 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[1];
6532 bufferBytes *= formatBytes( stream_.deviceFormat[1] );
6535 buffer = stream_.userBuffer[1];
6536 bufferBytes = stream_.bufferSize * stream_.nUserChannels[1];
6537 bufferBytes *= formatBytes( stream_.userFormat );
6540 LPDIRECTSOUNDCAPTUREBUFFER dsBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6541 long nextReadPointer = handle->bufferPointer[1];
6542 DWORD dsBufferSize = handle->dsBufferSize[1];
6544 // Find out where the write and "safe read" pointers are.
6545 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
6546 if ( FAILED( result ) ) {
6547 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6548 errorText_ = errorStream_.str();
6549 MUTEX_UNLOCK( &stream_.mutex );
6550 error( RtAudioError::SYSTEM_ERROR );
6554 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
6555 DWORD endRead = nextReadPointer + bufferBytes;
6557 // Handling depends on whether we are INPUT or DUPLEX.
6558 // If we're in INPUT mode then waiting is a good thing. If we're in DUPLEX mode,
6559 // then a wait here will drag the write pointers into the forbidden zone.
6561 // In DUPLEX mode, rather than wait, we will back off the read pointer until
6562 // it's in a safe position. This causes dropouts, but it seems to be the only
6563 // practical way to sync up the read and write pointers reliably, given the
6564 // the very complex relationship between phase and increment of the read and write
6567 // In order to minimize audible dropouts in DUPLEX mode, we will
6568 // provide a pre-roll period of 0.5 seconds in which we return
6569 // zeros from the read buffer while the pointers sync up.
6571 if ( stream_.mode == DUPLEX ) {
6572 if ( safeReadPointer < endRead ) {
6573 if ( duplexPrerollBytes <= 0 ) {
6574 // Pre-roll time over. Be more agressive.
6575 int adjustment = endRead-safeReadPointer;
6577 handle->xrun[1] = true;
6579 // - large adjustments: we've probably run out of CPU cycles, so just resync exactly,
6580 // and perform fine adjustments later.
6581 // - small adjustments: back off by twice as much.
6582 if ( adjustment >= 2*bufferBytes )
6583 nextReadPointer = safeReadPointer-2*bufferBytes;
6585 nextReadPointer = safeReadPointer-bufferBytes-adjustment;
6587 if ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
6591 // In pre=roll time. Just do it.
6592 nextReadPointer = safeReadPointer - bufferBytes;
6593 while ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
6595 endRead = nextReadPointer + bufferBytes;
6598 else { // mode == INPUT
6599 while ( safeReadPointer < endRead && stream_.callbackInfo.isRunning ) {
6600 // See comments for playback.
6601 double millis = (endRead - safeReadPointer) * 1000.0;
6602 millis /= ( formatBytes(stream_.deviceFormat[1]) * stream_.nDeviceChannels[1] * stream_.sampleRate);
6603 if ( millis < 1.0 ) millis = 1.0;
6604 Sleep( (DWORD) millis );
6606 // Wake up and find out where we are now.
6607 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
6608 if ( FAILED( result ) ) {
6609 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6610 errorText_ = errorStream_.str();
6611 MUTEX_UNLOCK( &stream_.mutex );
6612 error( RtAudioError::SYSTEM_ERROR );
6616 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
6620 // Lock free space in the buffer
6621 result = dsBuffer->Lock( nextReadPointer, bufferBytes, &buffer1,
6622 &bufferSize1, &buffer2, &bufferSize2, 0 );
6623 if ( FAILED( result ) ) {
6624 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking capture buffer!";
6625 errorText_ = errorStream_.str();
6626 MUTEX_UNLOCK( &stream_.mutex );
6627 error( RtAudioError::SYSTEM_ERROR );
6631 if ( duplexPrerollBytes <= 0 ) {
6632 // Copy our buffer into the DS buffer
6633 CopyMemory( buffer, buffer1, bufferSize1 );
6634 if ( buffer2 != NULL ) CopyMemory( buffer+bufferSize1, buffer2, bufferSize2 );
6637 memset( buffer, 0, bufferSize1 );
6638 if ( buffer2 != NULL ) memset( buffer + bufferSize1, 0, bufferSize2 );
6639 duplexPrerollBytes -= bufferSize1 + bufferSize2;
6642 // Update our buffer offset and unlock sound buffer
6643 nextReadPointer = ( nextReadPointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
6644 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
6645 if ( FAILED( result ) ) {
6646 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking capture buffer!";
6647 errorText_ = errorStream_.str();
6648 MUTEX_UNLOCK( &stream_.mutex );
6649 error( RtAudioError::SYSTEM_ERROR );
6652 handle->bufferPointer[1] = nextReadPointer;
6654 // No byte swapping necessary in DirectSound implementation.
6656 // If necessary, convert 8-bit data from unsigned to signed.
6657 if ( stream_.deviceFormat[1] == RTAUDIO_SINT8 )
6658 for ( int j=0; j<bufferBytes; j++ ) buffer[j] = (signed char) ( buffer[j] - 128 );
6660 // Do buffer conversion if necessary.
6661 if ( stream_.doConvertBuffer[1] )
6662 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
6666 MUTEX_UNLOCK( &stream_.mutex );
6667 RtApi::tickStreamTime();
6670 // Definitions for utility functions and callbacks
6671 // specific to the DirectSound implementation.
6673 static unsigned __stdcall callbackHandler( void *ptr )
6675 CallbackInfo *info = (CallbackInfo *) ptr;
6676 RtApiDs *object = (RtApiDs *) info->object;
6677 bool* isRunning = &info->isRunning;
6679 while ( *isRunning == true ) {
6680 object->callbackEvent();
6687 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
6688 LPCTSTR description,
6692 struct DsProbeData& probeInfo = *(struct DsProbeData*) lpContext;
6693 std::vector<struct DsDevice>& dsDevices = *probeInfo.dsDevices;
6696 bool validDevice = false;
6697 if ( probeInfo.isInput == true ) {
6699 LPDIRECTSOUNDCAPTURE object;
6701 hr = DirectSoundCaptureCreate( lpguid, &object, NULL );
6702 if ( hr != DS_OK ) return TRUE;
6704 caps.dwSize = sizeof(caps);
6705 hr = object->GetCaps( &caps );
6706 if ( hr == DS_OK ) {
6707 if ( caps.dwChannels > 0 && caps.dwFormats > 0 )
6714 LPDIRECTSOUND object;
6715 hr = DirectSoundCreate( lpguid, &object, NULL );
6716 if ( hr != DS_OK ) return TRUE;
6718 caps.dwSize = sizeof(caps);
6719 hr = object->GetCaps( &caps );
6720 if ( hr == DS_OK ) {
6721 if ( caps.dwFlags & DSCAPS_PRIMARYMONO || caps.dwFlags & DSCAPS_PRIMARYSTEREO )
6727 // If good device, then save its name and guid.
6728 std::string name = convertCharPointerToStdString( description );
6729 //if ( name == "Primary Sound Driver" || name == "Primary Sound Capture Driver" )
6730 if ( lpguid == NULL )
6731 name = "Default Device";
6732 if ( validDevice ) {
6733 for ( unsigned int i=0; i<dsDevices.size(); i++ ) {
6734 if ( dsDevices[i].name == name ) {
6735 dsDevices[i].found = true;
6736 if ( probeInfo.isInput ) {
6737 dsDevices[i].id[1] = lpguid;
6738 dsDevices[i].validId[1] = true;
6741 dsDevices[i].id[0] = lpguid;
6742 dsDevices[i].validId[0] = true;
6750 device.found = true;
6751 if ( probeInfo.isInput ) {
6752 device.id[1] = lpguid;
6753 device.validId[1] = true;
6756 device.id[0] = lpguid;
6757 device.validId[0] = true;
6759 dsDevices.push_back( device );
6765 static const char* getErrorString( int code )
6769 case DSERR_ALLOCATED:
6770 return "Already allocated";
6772 case DSERR_CONTROLUNAVAIL:
6773 return "Control unavailable";
6775 case DSERR_INVALIDPARAM:
6776 return "Invalid parameter";
6778 case DSERR_INVALIDCALL:
6779 return "Invalid call";
6782 return "Generic error";
6784 case DSERR_PRIOLEVELNEEDED:
6785 return "Priority level needed";
6787 case DSERR_OUTOFMEMORY:
6788 return "Out of memory";
6790 case DSERR_BADFORMAT:
6791 return "The sample rate or the channel format is not supported";
6793 case DSERR_UNSUPPORTED:
6794 return "Not supported";
6796 case DSERR_NODRIVER:
6799 case DSERR_ALREADYINITIALIZED:
6800 return "Already initialized";
6802 case DSERR_NOAGGREGATION:
6803 return "No aggregation";
6805 case DSERR_BUFFERLOST:
6806 return "Buffer lost";
6808 case DSERR_OTHERAPPHASPRIO:
6809 return "Another application already has priority";
6811 case DSERR_UNINITIALIZED:
6812 return "Uninitialized";
6815 return "DirectSound unknown error";
6818 //******************** End of __WINDOWS_DS__ *********************//
6822 #if defined(__LINUX_ALSA__)
6824 #include <alsa/asoundlib.h>
6827 // A structure to hold various information related to the ALSA API
6830 snd_pcm_t *handles[2];
6833 pthread_cond_t runnable_cv;
6837 :synchronized(false), runnable(false) { xrun[0] = false; xrun[1] = false; }
6840 static void *alsaCallbackHandler( void * ptr );
6842 RtApiAlsa :: RtApiAlsa()
6844 // Nothing to do here.
6847 RtApiAlsa :: ~RtApiAlsa()
6849 if ( stream_.state != STREAM_CLOSED ) closeStream();
6852 unsigned int RtApiAlsa :: getDeviceCount( void )
6854 unsigned nDevices = 0;
6855 int result, subdevice, card;
6859 // Count cards and devices
6861 snd_card_next( &card );
6862 while ( card >= 0 ) {
6863 sprintf( name, "hw:%d", card );
6864 result = snd_ctl_open( &handle, name, 0 );
6866 errorStream_ << "RtApiAlsa::getDeviceCount: control open, card = " << card << ", " << snd_strerror( result ) << ".";
6867 errorText_ = errorStream_.str();
6868 error( RtAudioError::WARNING );
6873 result = snd_ctl_pcm_next_device( handle, &subdevice );
6875 errorStream_ << "RtApiAlsa::getDeviceCount: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
6876 errorText_ = errorStream_.str();
6877 error( RtAudioError::WARNING );
6880 if ( subdevice < 0 )
6885 snd_ctl_close( handle );
6886 snd_card_next( &card );
6889 result = snd_ctl_open( &handle, "default", 0 );
6892 snd_ctl_close( handle );
6898 RtAudio::DeviceInfo RtApiAlsa :: getDeviceInfo( unsigned int device )
6900 RtAudio::DeviceInfo info;
6901 info.probed = false;
6903 unsigned nDevices = 0;
6904 int result, subdevice, card;
6908 // Count cards and devices
6911 snd_card_next( &card );
6912 while ( card >= 0 ) {
6913 sprintf( name, "hw:%d", card );
6914 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
6916 errorStream_ << "RtApiAlsa::getDeviceInfo: control open, card = " << card << ", " << snd_strerror( result ) << ".";
6917 errorText_ = errorStream_.str();
6918 error( RtAudioError::WARNING );
6923 result = snd_ctl_pcm_next_device( chandle, &subdevice );
6925 errorStream_ << "RtApiAlsa::getDeviceInfo: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
6926 errorText_ = errorStream_.str();
6927 error( RtAudioError::WARNING );
6930 if ( subdevice < 0 ) break;
6931 if ( nDevices == device ) {
6932 sprintf( name, "hw:%d,%d", card, subdevice );
6938 snd_ctl_close( chandle );
6939 snd_card_next( &card );
6942 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
6943 if ( result == 0 ) {
6944 if ( nDevices == device ) {
6945 strcpy( name, "default" );
6951 if ( nDevices == 0 ) {
6952 errorText_ = "RtApiAlsa::getDeviceInfo: no devices found!";
6953 error( RtAudioError::INVALID_USE );
6957 if ( device >= nDevices ) {
6958 errorText_ = "RtApiAlsa::getDeviceInfo: device ID is invalid!";
6959 error( RtAudioError::INVALID_USE );
6965 // If a stream is already open, we cannot probe the stream devices.
6966 // Thus, use the saved results.
6967 if ( stream_.state != STREAM_CLOSED &&
6968 ( stream_.device[0] == device || stream_.device[1] == device ) ) {
6969 snd_ctl_close( chandle );
6970 if ( device >= devices_.size() ) {
6971 errorText_ = "RtApiAlsa::getDeviceInfo: device ID was not present before stream was opened.";
6972 error( RtAudioError::WARNING );
6975 return devices_[ device ];
6978 int openMode = SND_PCM_ASYNC;
6979 snd_pcm_stream_t stream;
6980 snd_pcm_info_t *pcminfo;
6981 snd_pcm_info_alloca( &pcminfo );
6983 snd_pcm_hw_params_t *params;
6984 snd_pcm_hw_params_alloca( ¶ms );
6986 // First try for playback unless default device (which has subdev -1)
6987 stream = SND_PCM_STREAM_PLAYBACK;
6988 snd_pcm_info_set_stream( pcminfo, stream );
6989 if ( subdevice != -1 ) {
6990 snd_pcm_info_set_device( pcminfo, subdevice );
6991 snd_pcm_info_set_subdevice( pcminfo, 0 );
6993 result = snd_ctl_pcm_info( chandle, pcminfo );
6995 // Device probably doesn't support playback.
7000 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK );
7002 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7003 errorText_ = errorStream_.str();
7004 error( RtAudioError::WARNING );
7008 // The device is open ... fill the parameter structure.
7009 result = snd_pcm_hw_params_any( phandle, params );
7011 snd_pcm_close( phandle );
7012 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7013 errorText_ = errorStream_.str();
7014 error( RtAudioError::WARNING );
7018 // Get output channel information.
7020 result = snd_pcm_hw_params_get_channels_max( params, &value );
7022 snd_pcm_close( phandle );
7023 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") output channels, " << snd_strerror( result ) << ".";
7024 errorText_ = errorStream_.str();
7025 error( RtAudioError::WARNING );
7028 info.outputChannels = value;
7029 snd_pcm_close( phandle );
7032 stream = SND_PCM_STREAM_CAPTURE;
7033 snd_pcm_info_set_stream( pcminfo, stream );
7035 // Now try for capture unless default device (with subdev = -1)
7036 if ( subdevice != -1 ) {
7037 result = snd_ctl_pcm_info( chandle, pcminfo );
7038 snd_ctl_close( chandle );
7040 // Device probably doesn't support capture.
7041 if ( info.outputChannels == 0 ) return info;
7042 goto probeParameters;
7046 snd_ctl_close( chandle );
7048 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
7050 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7051 errorText_ = errorStream_.str();
7052 error( RtAudioError::WARNING );
7053 if ( info.outputChannels == 0 ) return info;
7054 goto probeParameters;
7057 // The device is open ... fill the parameter structure.
7058 result = snd_pcm_hw_params_any( phandle, params );
7060 snd_pcm_close( phandle );
7061 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7062 errorText_ = errorStream_.str();
7063 error( RtAudioError::WARNING );
7064 if ( info.outputChannels == 0 ) return info;
7065 goto probeParameters;
7068 result = snd_pcm_hw_params_get_channels_max( params, &value );
7070 snd_pcm_close( phandle );
7071 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") input channels, " << snd_strerror( result ) << ".";
7072 errorText_ = errorStream_.str();
7073 error( RtAudioError::WARNING );
7074 if ( info.outputChannels == 0 ) return info;
7075 goto probeParameters;
7077 info.inputChannels = value;
7078 snd_pcm_close( phandle );
7080 // If device opens for both playback and capture, we determine the channels.
7081 if ( info.outputChannels > 0 && info.inputChannels > 0 )
7082 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
7084 // ALSA doesn't provide default devices so we'll use the first available one.
7085 if ( device == 0 && info.outputChannels > 0 )
7086 info.isDefaultOutput = true;
7087 if ( device == 0 && info.inputChannels > 0 )
7088 info.isDefaultInput = true;
7091 // At this point, we just need to figure out the supported data
7092 // formats and sample rates. We'll proceed by opening the device in
7093 // the direction with the maximum number of channels, or playback if
7094 // they are equal. This might limit our sample rate options, but so
7097 if ( info.outputChannels >= info.inputChannels )
7098 stream = SND_PCM_STREAM_PLAYBACK;
7100 stream = SND_PCM_STREAM_CAPTURE;
7101 snd_pcm_info_set_stream( pcminfo, stream );
7103 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
7105 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7106 errorText_ = errorStream_.str();
7107 error( RtAudioError::WARNING );
7111 // The device is open ... fill the parameter structure.
7112 result = snd_pcm_hw_params_any( phandle, params );
7114 snd_pcm_close( phandle );
7115 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7116 errorText_ = errorStream_.str();
7117 error( RtAudioError::WARNING );
7121 // Test our discrete set of sample rate values.
7122 info.sampleRates.clear();
7123 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
7124 if ( snd_pcm_hw_params_test_rate( phandle, params, SAMPLE_RATES[i], 0 ) == 0 ) {
7125 info.sampleRates.push_back( SAMPLE_RATES[i] );
7127 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
7128 info.preferredSampleRate = SAMPLE_RATES[i];
7131 if ( info.sampleRates.size() == 0 ) {
7132 snd_pcm_close( phandle );
7133 errorStream_ << "RtApiAlsa::getDeviceInfo: no supported sample rates found for device (" << name << ").";
7134 errorText_ = errorStream_.str();
7135 error( RtAudioError::WARNING );
7139 // Probe the supported data formats ... we don't care about endian-ness just yet
7140 snd_pcm_format_t format;
7141 info.nativeFormats = 0;
7142 format = SND_PCM_FORMAT_S8;
7143 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7144 info.nativeFormats |= RTAUDIO_SINT8;
7145 format = SND_PCM_FORMAT_S16;
7146 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7147 info.nativeFormats |= RTAUDIO_SINT16;
7148 format = SND_PCM_FORMAT_S24;
7149 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7150 info.nativeFormats |= RTAUDIO_SINT24;
7151 format = SND_PCM_FORMAT_S32;
7152 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7153 info.nativeFormats |= RTAUDIO_SINT32;
7154 format = SND_PCM_FORMAT_FLOAT;
7155 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7156 info.nativeFormats |= RTAUDIO_FLOAT32;
7157 format = SND_PCM_FORMAT_FLOAT64;
7158 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7159 info.nativeFormats |= RTAUDIO_FLOAT64;
7161 // Check that we have at least one supported format
7162 if ( info.nativeFormats == 0 ) {
7163 snd_pcm_close( phandle );
7164 errorStream_ << "RtApiAlsa::getDeviceInfo: pcm device (" << name << ") data format not supported by RtAudio.";
7165 errorText_ = errorStream_.str();
7166 error( RtAudioError::WARNING );
7170 // Get the device name
7172 result = snd_card_get_name( card, &cardname );
7173 if ( result >= 0 ) {
7174 sprintf( name, "hw:%s,%d", cardname, subdevice );
7179 // That's all ... close the device and return
7180 snd_pcm_close( phandle );
7185 void RtApiAlsa :: saveDeviceInfo( void )
7189 unsigned int nDevices = getDeviceCount();
7190 devices_.resize( nDevices );
7191 for ( unsigned int i=0; i<nDevices; i++ )
7192 devices_[i] = getDeviceInfo( i );
7195 bool RtApiAlsa :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
7196 unsigned int firstChannel, unsigned int sampleRate,
7197 RtAudioFormat format, unsigned int *bufferSize,
7198 RtAudio::StreamOptions *options )
7201 #if defined(__RTAUDIO_DEBUG__)
7203 snd_output_stdio_attach(&out, stderr, 0);
7206 // I'm not using the "plug" interface ... too much inconsistent behavior.
7208 unsigned nDevices = 0;
7209 int result, subdevice, card;
7213 if ( options && options->flags & RTAUDIO_ALSA_USE_DEFAULT )
7214 snprintf(name, sizeof(name), "%s", "default");
7216 // Count cards and devices
7218 snd_card_next( &card );
7219 while ( card >= 0 ) {
7220 sprintf( name, "hw:%d", card );
7221 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
7223 errorStream_ << "RtApiAlsa::probeDeviceOpen: control open, card = " << card << ", " << snd_strerror( result ) << ".";
7224 errorText_ = errorStream_.str();
7229 result = snd_ctl_pcm_next_device( chandle, &subdevice );
7230 if ( result < 0 ) break;
7231 if ( subdevice < 0 ) break;
7232 if ( nDevices == device ) {
7233 sprintf( name, "hw:%d,%d", card, subdevice );
7234 snd_ctl_close( chandle );
7239 snd_ctl_close( chandle );
7240 snd_card_next( &card );
7243 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
7244 if ( result == 0 ) {
7245 if ( nDevices == device ) {
7246 strcpy( name, "default" );
7252 if ( nDevices == 0 ) {
7253 // This should not happen because a check is made before this function is called.
7254 errorText_ = "RtApiAlsa::probeDeviceOpen: no devices found!";
7258 if ( device >= nDevices ) {
7259 // This should not happen because a check is made before this function is called.
7260 errorText_ = "RtApiAlsa::probeDeviceOpen: device ID is invalid!";
7267 // The getDeviceInfo() function will not work for a device that is
7268 // already open. Thus, we'll probe the system before opening a
7269 // stream and save the results for use by getDeviceInfo().
7270 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) // only do once
7271 this->saveDeviceInfo();
7273 snd_pcm_stream_t stream;
7274 if ( mode == OUTPUT )
7275 stream = SND_PCM_STREAM_PLAYBACK;
7277 stream = SND_PCM_STREAM_CAPTURE;
7280 int openMode = SND_PCM_ASYNC;
7281 result = snd_pcm_open( &phandle, name, stream, openMode );
7283 if ( mode == OUTPUT )
7284 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for output.";
7286 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for input.";
7287 errorText_ = errorStream_.str();
7291 // Fill the parameter structure.
7292 snd_pcm_hw_params_t *hw_params;
7293 snd_pcm_hw_params_alloca( &hw_params );
7294 result = snd_pcm_hw_params_any( phandle, hw_params );
7296 snd_pcm_close( phandle );
7297 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") parameters, " << snd_strerror( result ) << ".";
7298 errorText_ = errorStream_.str();
7302 #if defined(__RTAUDIO_DEBUG__)
7303 fprintf( stderr, "\nRtApiAlsa: dump hardware params just after device open:\n\n" );
7304 snd_pcm_hw_params_dump( hw_params, out );
7307 // Set access ... check user preference.
7308 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) {
7309 stream_.userInterleaved = false;
7310 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
7312 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
7313 stream_.deviceInterleaved[mode] = true;
7316 stream_.deviceInterleaved[mode] = false;
7319 stream_.userInterleaved = true;
7320 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
7322 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
7323 stream_.deviceInterleaved[mode] = false;
7326 stream_.deviceInterleaved[mode] = true;
7330 snd_pcm_close( phandle );
7331 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") access, " << snd_strerror( result ) << ".";
7332 errorText_ = errorStream_.str();
7336 // Determine how to set the device format.
7337 stream_.userFormat = format;
7338 snd_pcm_format_t deviceFormat = SND_PCM_FORMAT_UNKNOWN;
7340 if ( format == RTAUDIO_SINT8 )
7341 deviceFormat = SND_PCM_FORMAT_S8;
7342 else if ( format == RTAUDIO_SINT16 )
7343 deviceFormat = SND_PCM_FORMAT_S16;
7344 else if ( format == RTAUDIO_SINT24 )
7345 deviceFormat = SND_PCM_FORMAT_S24;
7346 else if ( format == RTAUDIO_SINT32 )
7347 deviceFormat = SND_PCM_FORMAT_S32;
7348 else if ( format == RTAUDIO_FLOAT32 )
7349 deviceFormat = SND_PCM_FORMAT_FLOAT;
7350 else if ( format == RTAUDIO_FLOAT64 )
7351 deviceFormat = SND_PCM_FORMAT_FLOAT64;
7353 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat) == 0) {
7354 stream_.deviceFormat[mode] = format;
7358 // The user requested format is not natively supported by the device.
7359 deviceFormat = SND_PCM_FORMAT_FLOAT64;
7360 if ( snd_pcm_hw_params_test_format( phandle, hw_params, deviceFormat ) == 0 ) {
7361 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
7365 deviceFormat = SND_PCM_FORMAT_FLOAT;
7366 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7367 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
7371 deviceFormat = SND_PCM_FORMAT_S32;
7372 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7373 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
7377 deviceFormat = SND_PCM_FORMAT_S24;
7378 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7379 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
7383 deviceFormat = SND_PCM_FORMAT_S16;
7384 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7385 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
7389 deviceFormat = SND_PCM_FORMAT_S8;
7390 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7391 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
7395 // If we get here, no supported format was found.
7396 snd_pcm_close( phandle );
7397 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device " << device << " data format not supported by RtAudio.";
7398 errorText_ = errorStream_.str();
7402 result = snd_pcm_hw_params_set_format( phandle, hw_params, deviceFormat );
7404 snd_pcm_close( phandle );
7405 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") data format, " << snd_strerror( result ) << ".";
7406 errorText_ = errorStream_.str();
7410 // Determine whether byte-swaping is necessary.
7411 stream_.doByteSwap[mode] = false;
7412 if ( deviceFormat != SND_PCM_FORMAT_S8 ) {
7413 result = snd_pcm_format_cpu_endian( deviceFormat );
7415 stream_.doByteSwap[mode] = true;
7416 else if (result < 0) {
7417 snd_pcm_close( phandle );
7418 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") endian-ness, " << snd_strerror( result ) << ".";
7419 errorText_ = errorStream_.str();
7424 // Set the sample rate.
7425 result = snd_pcm_hw_params_set_rate_near( phandle, hw_params, (unsigned int*) &sampleRate, 0 );
7427 snd_pcm_close( phandle );
7428 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting sample rate on device (" << name << "), " << snd_strerror( result ) << ".";
7429 errorText_ = errorStream_.str();
7433 // Determine the number of channels for this device. We support a possible
7434 // minimum device channel number > than the value requested by the user.
7435 stream_.nUserChannels[mode] = channels;
7437 result = snd_pcm_hw_params_get_channels_max( hw_params, &value );
7438 unsigned int deviceChannels = value;
7439 if ( result < 0 || deviceChannels < channels + firstChannel ) {
7440 snd_pcm_close( phandle );
7441 errorStream_ << "RtApiAlsa::probeDeviceOpen: requested channel parameters not supported by device (" << name << "), " << snd_strerror( result ) << ".";
7442 errorText_ = errorStream_.str();
7446 result = snd_pcm_hw_params_get_channels_min( hw_params, &value );
7448 snd_pcm_close( phandle );
7449 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting minimum channels for device (" << name << "), " << snd_strerror( result ) << ".";
7450 errorText_ = errorStream_.str();
7453 deviceChannels = value;
7454 if ( deviceChannels < channels + firstChannel ) deviceChannels = channels + firstChannel;
7455 stream_.nDeviceChannels[mode] = deviceChannels;
7457 // Set the device channels.
7458 result = snd_pcm_hw_params_set_channels( phandle, hw_params, deviceChannels );
7460 snd_pcm_close( phandle );
7461 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting channels for device (" << name << "), " << snd_strerror( result ) << ".";
7462 errorText_ = errorStream_.str();
7466 // Set the buffer (or period) size.
7468 snd_pcm_uframes_t periodSize = *bufferSize;
7469 result = snd_pcm_hw_params_set_period_size_near( phandle, hw_params, &periodSize, &dir );
7471 snd_pcm_close( phandle );
7472 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting period size for device (" << name << "), " << snd_strerror( result ) << ".";
7473 errorText_ = errorStream_.str();
7476 *bufferSize = periodSize;
7478 // Set the buffer number, which in ALSA is referred to as the "period".
7479 unsigned int periods = 0;
7480 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) periods = 2;
7481 if ( options && options->numberOfBuffers > 0 ) periods = options->numberOfBuffers;
7482 if ( periods < 2 ) periods = 4; // a fairly safe default value
7483 result = snd_pcm_hw_params_set_periods_near( phandle, hw_params, &periods, &dir );
7485 snd_pcm_close( phandle );
7486 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting periods for device (" << name << "), " << snd_strerror( result ) << ".";
7487 errorText_ = errorStream_.str();
7491 // If attempting to setup a duplex stream, the bufferSize parameter
7492 // MUST be the same in both directions!
7493 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
7494 snd_pcm_close( phandle );
7495 errorStream_ << "RtApiAlsa::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << name << ").";
7496 errorText_ = errorStream_.str();
7500 stream_.bufferSize = *bufferSize;
7502 // Install the hardware configuration
7503 result = snd_pcm_hw_params( phandle, hw_params );
7505 snd_pcm_close( phandle );
7506 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing hardware configuration on device (" << name << "), " << snd_strerror( result ) << ".";
7507 errorText_ = errorStream_.str();
7511 #if defined(__RTAUDIO_DEBUG__)
7512 fprintf(stderr, "\nRtApiAlsa: dump hardware params after installation:\n\n");
7513 snd_pcm_hw_params_dump( hw_params, out );
7516 // Set the software configuration to fill buffers with zeros and prevent device stopping on xruns.
7517 snd_pcm_sw_params_t *sw_params = NULL;
7518 snd_pcm_sw_params_alloca( &sw_params );
7519 snd_pcm_sw_params_current( phandle, sw_params );
7520 snd_pcm_sw_params_set_start_threshold( phandle, sw_params, *bufferSize );
7521 snd_pcm_sw_params_set_stop_threshold( phandle, sw_params, ULONG_MAX );
7522 snd_pcm_sw_params_set_silence_threshold( phandle, sw_params, 0 );
7524 // The following two settings were suggested by Theo Veenker
7525 //snd_pcm_sw_params_set_avail_min( phandle, sw_params, *bufferSize );
7526 //snd_pcm_sw_params_set_xfer_align( phandle, sw_params, 1 );
7528 // here are two options for a fix
7529 //snd_pcm_sw_params_set_silence_size( phandle, sw_params, ULONG_MAX );
7530 snd_pcm_uframes_t val;
7531 snd_pcm_sw_params_get_boundary( sw_params, &val );
7532 snd_pcm_sw_params_set_silence_size( phandle, sw_params, val );
7534 result = snd_pcm_sw_params( phandle, sw_params );
7536 snd_pcm_close( phandle );
7537 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing software configuration on device (" << name << "), " << snd_strerror( result ) << ".";
7538 errorText_ = errorStream_.str();
7542 #if defined(__RTAUDIO_DEBUG__)
7543 fprintf(stderr, "\nRtApiAlsa: dump software params after installation:\n\n");
7544 snd_pcm_sw_params_dump( sw_params, out );
7547 // Set flags for buffer conversion
7548 stream_.doConvertBuffer[mode] = false;
7549 if ( stream_.userFormat != stream_.deviceFormat[mode] )
7550 stream_.doConvertBuffer[mode] = true;
7551 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
7552 stream_.doConvertBuffer[mode] = true;
7553 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
7554 stream_.nUserChannels[mode] > 1 )
7555 stream_.doConvertBuffer[mode] = true;
7557 // Allocate the ApiHandle if necessary and then save.
7558 AlsaHandle *apiInfo = 0;
7559 if ( stream_.apiHandle == 0 ) {
7561 apiInfo = (AlsaHandle *) new AlsaHandle;
7563 catch ( std::bad_alloc& ) {
7564 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating AlsaHandle memory.";
7568 if ( pthread_cond_init( &apiInfo->runnable_cv, NULL ) ) {
7569 errorText_ = "RtApiAlsa::probeDeviceOpen: error initializing pthread condition variable.";
7573 stream_.apiHandle = (void *) apiInfo;
7574 apiInfo->handles[0] = 0;
7575 apiInfo->handles[1] = 0;
7578 apiInfo = (AlsaHandle *) stream_.apiHandle;
7580 apiInfo->handles[mode] = phandle;
7583 // Allocate necessary internal buffers.
7584 unsigned long bufferBytes;
7585 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
7586 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
7587 if ( stream_.userBuffer[mode] == NULL ) {
7588 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating user buffer memory.";
7592 if ( stream_.doConvertBuffer[mode] ) {
7594 bool makeBuffer = true;
7595 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
7596 if ( mode == INPUT ) {
7597 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
7598 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
7599 if ( bufferBytes <= bytesOut ) makeBuffer = false;
7604 bufferBytes *= *bufferSize;
7605 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
7606 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
7607 if ( stream_.deviceBuffer == NULL ) {
7608 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating device buffer memory.";
7614 stream_.sampleRate = sampleRate;
7615 stream_.nBuffers = periods;
7616 stream_.device[mode] = device;
7617 stream_.state = STREAM_STOPPED;
7619 // Setup the buffer conversion information structure.
7620 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
7622 // Setup thread if necessary.
7623 if ( stream_.mode == OUTPUT && mode == INPUT ) {
7624 // We had already set up an output stream.
7625 stream_.mode = DUPLEX;
7626 // Link the streams if possible.
7627 apiInfo->synchronized = false;
7628 if ( snd_pcm_link( apiInfo->handles[0], apiInfo->handles[1] ) == 0 )
7629 apiInfo->synchronized = true;
7631 errorText_ = "RtApiAlsa::probeDeviceOpen: unable to synchronize input and output devices.";
7632 error( RtAudioError::WARNING );
7636 stream_.mode = mode;
7638 // Setup callback thread.
7639 stream_.callbackInfo.object = (void *) this;
7641 // Set the thread attributes for joinable and realtime scheduling
7642 // priority (optional). The higher priority will only take affect
7643 // if the program is run as root or suid. Note, under Linux
7644 // processes with CAP_SYS_NICE privilege, a user can change
7645 // scheduling policy and priority (thus need not be root). See
7646 // POSIX "capabilities".
7647 pthread_attr_t attr;
7648 pthread_attr_init( &attr );
7649 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
7650 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
7651 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
7652 stream_.callbackInfo.doRealtime = true;
7653 struct sched_param param;
7654 int priority = options->priority;
7655 int min = sched_get_priority_min( SCHED_RR );
7656 int max = sched_get_priority_max( SCHED_RR );
7657 if ( priority < min ) priority = min;
7658 else if ( priority > max ) priority = max;
7659 param.sched_priority = priority;
7661 // Set the policy BEFORE the priority. Otherwise it fails.
7662 pthread_attr_setschedpolicy(&attr, SCHED_RR);
7663 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
7664 // This is definitely required. Otherwise it fails.
7665 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
7666 pthread_attr_setschedparam(&attr, ¶m);
7669 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
7671 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
7674 stream_.callbackInfo.isRunning = true;
7675 result = pthread_create( &stream_.callbackInfo.thread, &attr, alsaCallbackHandler, &stream_.callbackInfo );
7676 pthread_attr_destroy( &attr );
7678 // Failed. Try instead with default attributes.
7679 result = pthread_create( &stream_.callbackInfo.thread, NULL, alsaCallbackHandler, &stream_.callbackInfo );
7681 stream_.callbackInfo.isRunning = false;
7682 errorText_ = "RtApiAlsa::error creating callback thread!";
7692 pthread_cond_destroy( &apiInfo->runnable_cv );
7693 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
7694 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
7696 stream_.apiHandle = 0;
7699 if ( phandle) snd_pcm_close( phandle );
7701 for ( int i=0; i<2; i++ ) {
7702 if ( stream_.userBuffer[i] ) {
7703 free( stream_.userBuffer[i] );
7704 stream_.userBuffer[i] = 0;
7708 if ( stream_.deviceBuffer ) {
7709 free( stream_.deviceBuffer );
7710 stream_.deviceBuffer = 0;
7713 stream_.state = STREAM_CLOSED;
7717 void RtApiAlsa :: closeStream()
7719 if ( stream_.state == STREAM_CLOSED ) {
7720 errorText_ = "RtApiAlsa::closeStream(): no open stream to close!";
7721 error( RtAudioError::WARNING );
7725 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
7726 stream_.callbackInfo.isRunning = false;
7727 MUTEX_LOCK( &stream_.mutex );
7728 if ( stream_.state == STREAM_STOPPED ) {
7729 apiInfo->runnable = true;
7730 pthread_cond_signal( &apiInfo->runnable_cv );
7732 MUTEX_UNLOCK( &stream_.mutex );
7733 pthread_join( stream_.callbackInfo.thread, NULL );
7735 if ( stream_.state == STREAM_RUNNING ) {
7736 stream_.state = STREAM_STOPPED;
7737 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
7738 snd_pcm_drop( apiInfo->handles[0] );
7739 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
7740 snd_pcm_drop( apiInfo->handles[1] );
7744 pthread_cond_destroy( &apiInfo->runnable_cv );
7745 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
7746 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
7748 stream_.apiHandle = 0;
7751 for ( int i=0; i<2; i++ ) {
7752 if ( stream_.userBuffer[i] ) {
7753 free( stream_.userBuffer[i] );
7754 stream_.userBuffer[i] = 0;
7758 if ( stream_.deviceBuffer ) {
7759 free( stream_.deviceBuffer );
7760 stream_.deviceBuffer = 0;
7763 stream_.mode = UNINITIALIZED;
7764 stream_.state = STREAM_CLOSED;
7767 void RtApiAlsa :: startStream()
7769 // This method calls snd_pcm_prepare if the device isn't already in that state.
7772 if ( stream_.state == STREAM_RUNNING ) {
7773 errorText_ = "RtApiAlsa::startStream(): the stream is already running!";
7774 error( RtAudioError::WARNING );
7778 MUTEX_LOCK( &stream_.mutex );
7781 snd_pcm_state_t state;
7782 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
7783 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
7784 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
7785 state = snd_pcm_state( handle[0] );
7786 if ( state != SND_PCM_STATE_PREPARED ) {
7787 result = snd_pcm_prepare( handle[0] );
7789 errorStream_ << "RtApiAlsa::startStream: error preparing output pcm device, " << snd_strerror( result ) << ".";
7790 errorText_ = errorStream_.str();
7796 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
7797 result = snd_pcm_drop(handle[1]); // fix to remove stale data received since device has been open
7798 state = snd_pcm_state( handle[1] );
7799 if ( state != SND_PCM_STATE_PREPARED ) {
7800 result = snd_pcm_prepare( handle[1] );
7802 errorStream_ << "RtApiAlsa::startStream: error preparing input pcm device, " << snd_strerror( result ) << ".";
7803 errorText_ = errorStream_.str();
7809 stream_.state = STREAM_RUNNING;
7812 apiInfo->runnable = true;
7813 pthread_cond_signal( &apiInfo->runnable_cv );
7814 MUTEX_UNLOCK( &stream_.mutex );
7816 if ( result >= 0 ) return;
7817 error( RtAudioError::SYSTEM_ERROR );
7820 void RtApiAlsa :: stopStream()
7823 if ( stream_.state == STREAM_STOPPED ) {
7824 errorText_ = "RtApiAlsa::stopStream(): the stream is already stopped!";
7825 error( RtAudioError::WARNING );
7829 stream_.state = STREAM_STOPPED;
7830 MUTEX_LOCK( &stream_.mutex );
7833 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
7834 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
7835 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
7836 if ( apiInfo->synchronized )
7837 result = snd_pcm_drop( handle[0] );
7839 result = snd_pcm_drain( handle[0] );
7841 errorStream_ << "RtApiAlsa::stopStream: error draining output pcm device, " << snd_strerror( result ) << ".";
7842 errorText_ = errorStream_.str();
7847 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
7848 result = snd_pcm_drop( handle[1] );
7850 errorStream_ << "RtApiAlsa::stopStream: error stopping input pcm device, " << snd_strerror( result ) << ".";
7851 errorText_ = errorStream_.str();
7857 apiInfo->runnable = false; // fixes high CPU usage when stopped
7858 MUTEX_UNLOCK( &stream_.mutex );
7860 if ( result >= 0 ) return;
7861 error( RtAudioError::SYSTEM_ERROR );
7864 void RtApiAlsa :: abortStream()
7867 if ( stream_.state == STREAM_STOPPED ) {
7868 errorText_ = "RtApiAlsa::abortStream(): the stream is already stopped!";
7869 error( RtAudioError::WARNING );
7873 stream_.state = STREAM_STOPPED;
7874 MUTEX_LOCK( &stream_.mutex );
7877 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
7878 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
7879 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
7880 result = snd_pcm_drop( handle[0] );
7882 errorStream_ << "RtApiAlsa::abortStream: error aborting output pcm device, " << snd_strerror( result ) << ".";
7883 errorText_ = errorStream_.str();
7888 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
7889 result = snd_pcm_drop( handle[1] );
7891 errorStream_ << "RtApiAlsa::abortStream: error aborting input pcm device, " << snd_strerror( result ) << ".";
7892 errorText_ = errorStream_.str();
7898 apiInfo->runnable = false; // fixes high CPU usage when stopped
7899 MUTEX_UNLOCK( &stream_.mutex );
7901 if ( result >= 0 ) return;
7902 error( RtAudioError::SYSTEM_ERROR );
7905 void RtApiAlsa :: callbackEvent()
7907 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
7908 if ( stream_.state == STREAM_STOPPED ) {
7909 MUTEX_LOCK( &stream_.mutex );
7910 while ( !apiInfo->runnable )
7911 pthread_cond_wait( &apiInfo->runnable_cv, &stream_.mutex );
7913 if ( stream_.state != STREAM_RUNNING ) {
7914 MUTEX_UNLOCK( &stream_.mutex );
7917 MUTEX_UNLOCK( &stream_.mutex );
7920 if ( stream_.state == STREAM_CLOSED ) {
7921 errorText_ = "RtApiAlsa::callbackEvent(): the stream is closed ... this shouldn't happen!";
7922 error( RtAudioError::WARNING );
7926 int doStopStream = 0;
7927 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
7928 double streamTime = getStreamTime();
7929 RtAudioStreamStatus status = 0;
7930 if ( stream_.mode != INPUT && apiInfo->xrun[0] == true ) {
7931 status |= RTAUDIO_OUTPUT_UNDERFLOW;
7932 apiInfo->xrun[0] = false;
7934 if ( stream_.mode != OUTPUT && apiInfo->xrun[1] == true ) {
7935 status |= RTAUDIO_INPUT_OVERFLOW;
7936 apiInfo->xrun[1] = false;
7938 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
7939 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
7941 if ( doStopStream == 2 ) {
7946 MUTEX_LOCK( &stream_.mutex );
7948 // The state might change while waiting on a mutex.
7949 if ( stream_.state == STREAM_STOPPED ) goto unlock;
7955 snd_pcm_sframes_t frames;
7956 RtAudioFormat format;
7957 handle = (snd_pcm_t **) apiInfo->handles;
7959 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
7961 // Setup parameters.
7962 if ( stream_.doConvertBuffer[1] ) {
7963 buffer = stream_.deviceBuffer;
7964 channels = stream_.nDeviceChannels[1];
7965 format = stream_.deviceFormat[1];
7968 buffer = stream_.userBuffer[1];
7969 channels = stream_.nUserChannels[1];
7970 format = stream_.userFormat;
7973 // Read samples from device in interleaved/non-interleaved format.
7974 if ( stream_.deviceInterleaved[1] )
7975 result = snd_pcm_readi( handle[1], buffer, stream_.bufferSize );
7977 void *bufs[channels];
7978 size_t offset = stream_.bufferSize * formatBytes( format );
7979 for ( int i=0; i<channels; i++ )
7980 bufs[i] = (void *) (buffer + (i * offset));
7981 result = snd_pcm_readn( handle[1], bufs, stream_.bufferSize );
7984 if ( result < (int) stream_.bufferSize ) {
7985 // Either an error or overrun occured.
7986 if ( result == -EPIPE ) {
7987 snd_pcm_state_t state = snd_pcm_state( handle[1] );
7988 if ( state == SND_PCM_STATE_XRUN ) {
7989 apiInfo->xrun[1] = true;
7990 result = snd_pcm_prepare( handle[1] );
7992 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after overrun, " << snd_strerror( result ) << ".";
7993 errorText_ = errorStream_.str();
7997 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
7998 errorText_ = errorStream_.str();
8002 errorStream_ << "RtApiAlsa::callbackEvent: audio read error, " << snd_strerror( result ) << ".";
8003 errorText_ = errorStream_.str();
8005 error( RtAudioError::WARNING );
8009 // Do byte swapping if necessary.
8010 if ( stream_.doByteSwap[1] )
8011 byteSwapBuffer( buffer, stream_.bufferSize * channels, format );
8013 // Do buffer conversion if necessary.
8014 if ( stream_.doConvertBuffer[1] )
8015 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
8017 // Check stream latency
8018 result = snd_pcm_delay( handle[1], &frames );
8019 if ( result == 0 && frames > 0 ) stream_.latency[1] = frames;
8024 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8026 // Setup parameters and do buffer conversion if necessary.
8027 if ( stream_.doConvertBuffer[0] ) {
8028 buffer = stream_.deviceBuffer;
8029 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
8030 channels = stream_.nDeviceChannels[0];
8031 format = stream_.deviceFormat[0];
8034 buffer = stream_.userBuffer[0];
8035 channels = stream_.nUserChannels[0];
8036 format = stream_.userFormat;
8039 // Do byte swapping if necessary.
8040 if ( stream_.doByteSwap[0] )
8041 byteSwapBuffer(buffer, stream_.bufferSize * channels, format);
8043 // Write samples to device in interleaved/non-interleaved format.
8044 if ( stream_.deviceInterleaved[0] )
8045 result = snd_pcm_writei( handle[0], buffer, stream_.bufferSize );
8047 void *bufs[channels];
8048 size_t offset = stream_.bufferSize * formatBytes( format );
8049 for ( int i=0; i<channels; i++ )
8050 bufs[i] = (void *) (buffer + (i * offset));
8051 result = snd_pcm_writen( handle[0], bufs, stream_.bufferSize );
8054 if ( result < (int) stream_.bufferSize ) {
8055 // Either an error or underrun occured.
8056 if ( result == -EPIPE ) {
8057 snd_pcm_state_t state = snd_pcm_state( handle[0] );
8058 if ( state == SND_PCM_STATE_XRUN ) {
8059 apiInfo->xrun[0] = true;
8060 result = snd_pcm_prepare( handle[0] );
8062 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after underrun, " << snd_strerror( result ) << ".";
8063 errorText_ = errorStream_.str();
8066 errorText_ = "RtApiAlsa::callbackEvent: audio write error, underrun.";
8069 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
8070 errorText_ = errorStream_.str();
8074 errorStream_ << "RtApiAlsa::callbackEvent: audio write error, " << snd_strerror( result ) << ".";
8075 errorText_ = errorStream_.str();
8077 error( RtAudioError::WARNING );
8081 // Check stream latency
8082 result = snd_pcm_delay( handle[0], &frames );
8083 if ( result == 0 && frames > 0 ) stream_.latency[0] = frames;
8087 MUTEX_UNLOCK( &stream_.mutex );
8089 RtApi::tickStreamTime();
8090 if ( doStopStream == 1 ) this->stopStream();
8093 static void *alsaCallbackHandler( void *ptr )
8095 CallbackInfo *info = (CallbackInfo *) ptr;
8096 RtApiAlsa *object = (RtApiAlsa *) info->object;
8097 bool *isRunning = &info->isRunning;
8099 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
8100 if ( info->doRealtime ) {
8101 std::cerr << "RtAudio alsa: " <<
8102 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
8103 "running realtime scheduling" << std::endl;
8107 while ( *isRunning == true ) {
8108 pthread_testcancel();
8109 object->callbackEvent();
8112 pthread_exit( NULL );
8115 //******************** End of __LINUX_ALSA__ *********************//
8118 #if defined(__LINUX_PULSE__)
8120 // Code written by Peter Meerwald, pmeerw@pmeerw.net
8121 // and Tristan Matthews.
8123 #include <pulse/error.h>
8124 #include <pulse/simple.h>
8127 static const unsigned int SUPPORTED_SAMPLERATES[] = { 8000, 16000, 22050, 32000,
8128 44100, 48000, 96000, 0};
8130 struct rtaudio_pa_format_mapping_t {
8131 RtAudioFormat rtaudio_format;
8132 pa_sample_format_t pa_format;
8135 static const rtaudio_pa_format_mapping_t supported_sampleformats[] = {
8136 {RTAUDIO_SINT16, PA_SAMPLE_S16LE},
8137 {RTAUDIO_SINT32, PA_SAMPLE_S32LE},
8138 {RTAUDIO_FLOAT32, PA_SAMPLE_FLOAT32LE},
8139 {0, PA_SAMPLE_INVALID}};
8141 struct PulseAudioHandle {
8145 pthread_cond_t runnable_cv;
8147 PulseAudioHandle() : s_play(0), s_rec(0), runnable(false) { }
8150 RtApiPulse::~RtApiPulse()
8152 if ( stream_.state != STREAM_CLOSED )
8156 unsigned int RtApiPulse::getDeviceCount( void )
8161 RtAudio::DeviceInfo RtApiPulse::getDeviceInfo( unsigned int /*device*/ )
8163 RtAudio::DeviceInfo info;
8165 info.name = "PulseAudio";
8166 info.outputChannels = 2;
8167 info.inputChannels = 2;
8168 info.duplexChannels = 2;
8169 info.isDefaultOutput = true;
8170 info.isDefaultInput = true;
8172 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr )
8173 info.sampleRates.push_back( *sr );
8175 info.preferredSampleRate = 48000;
8176 info.nativeFormats = RTAUDIO_SINT16 | RTAUDIO_SINT32 | RTAUDIO_FLOAT32;
8181 static void *pulseaudio_callback( void * user )
8183 CallbackInfo *cbi = static_cast<CallbackInfo *>( user );
8184 RtApiPulse *context = static_cast<RtApiPulse *>( cbi->object );
8185 volatile bool *isRunning = &cbi->isRunning;
8187 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
8188 if (cbi->doRealtime) {
8189 std::cerr << "RtAudio pulse: " <<
8190 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
8191 "running realtime scheduling" << std::endl;
8195 while ( *isRunning ) {
8196 pthread_testcancel();
8197 context->callbackEvent();
8200 pthread_exit( NULL );
8203 void RtApiPulse::closeStream( void )
8205 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8207 stream_.callbackInfo.isRunning = false;
8209 MUTEX_LOCK( &stream_.mutex );
8210 if ( stream_.state == STREAM_STOPPED ) {
8211 pah->runnable = true;
8212 pthread_cond_signal( &pah->runnable_cv );
8214 MUTEX_UNLOCK( &stream_.mutex );
8216 pthread_join( pah->thread, 0 );
8217 if ( pah->s_play ) {
8218 pa_simple_flush( pah->s_play, NULL );
8219 pa_simple_free( pah->s_play );
8222 pa_simple_free( pah->s_rec );
8224 pthread_cond_destroy( &pah->runnable_cv );
8226 stream_.apiHandle = 0;
8229 if ( stream_.userBuffer[0] ) {
8230 free( stream_.userBuffer[0] );
8231 stream_.userBuffer[0] = 0;
8233 if ( stream_.userBuffer[1] ) {
8234 free( stream_.userBuffer[1] );
8235 stream_.userBuffer[1] = 0;
8238 stream_.state = STREAM_CLOSED;
8239 stream_.mode = UNINITIALIZED;
8242 void RtApiPulse::callbackEvent( void )
8244 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8246 if ( stream_.state == STREAM_STOPPED ) {
8247 MUTEX_LOCK( &stream_.mutex );
8248 while ( !pah->runnable )
8249 pthread_cond_wait( &pah->runnable_cv, &stream_.mutex );
8251 if ( stream_.state != STREAM_RUNNING ) {
8252 MUTEX_UNLOCK( &stream_.mutex );
8255 MUTEX_UNLOCK( &stream_.mutex );
8258 if ( stream_.state == STREAM_CLOSED ) {
8259 errorText_ = "RtApiPulse::callbackEvent(): the stream is closed ... "
8260 "this shouldn't happen!";
8261 error( RtAudioError::WARNING );
8265 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
8266 double streamTime = getStreamTime();
8267 RtAudioStreamStatus status = 0;
8268 int doStopStream = callback( stream_.userBuffer[OUTPUT], stream_.userBuffer[INPUT],
8269 stream_.bufferSize, streamTime, status,
8270 stream_.callbackInfo.userData );
8272 if ( doStopStream == 2 ) {
8277 MUTEX_LOCK( &stream_.mutex );
8278 void *pulse_in = stream_.doConvertBuffer[INPUT] ? stream_.deviceBuffer : stream_.userBuffer[INPUT];
8279 void *pulse_out = stream_.doConvertBuffer[OUTPUT] ? stream_.deviceBuffer : stream_.userBuffer[OUTPUT];
8281 if ( stream_.state != STREAM_RUNNING )
8286 if (stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8287 if ( stream_.doConvertBuffer[OUTPUT] ) {
8288 convertBuffer( stream_.deviceBuffer,
8289 stream_.userBuffer[OUTPUT],
8290 stream_.convertInfo[OUTPUT] );
8291 bytes = stream_.nDeviceChannels[OUTPUT] * stream_.bufferSize *
8292 formatBytes( stream_.deviceFormat[OUTPUT] );
8294 bytes = stream_.nUserChannels[OUTPUT] * stream_.bufferSize *
8295 formatBytes( stream_.userFormat );
8297 if ( pa_simple_write( pah->s_play, pulse_out, bytes, &pa_error ) < 0 ) {
8298 errorStream_ << "RtApiPulse::callbackEvent: audio write error, " <<
8299 pa_strerror( pa_error ) << ".";
8300 errorText_ = errorStream_.str();
8301 error( RtAudioError::WARNING );
8305 if ( stream_.mode == INPUT || stream_.mode == DUPLEX) {
8306 if ( stream_.doConvertBuffer[INPUT] )
8307 bytes = stream_.nDeviceChannels[INPUT] * stream_.bufferSize *
8308 formatBytes( stream_.deviceFormat[INPUT] );
8310 bytes = stream_.nUserChannels[INPUT] * stream_.bufferSize *
8311 formatBytes( stream_.userFormat );
8313 if ( pa_simple_read( pah->s_rec, pulse_in, bytes, &pa_error ) < 0 ) {
8314 errorStream_ << "RtApiPulse::callbackEvent: audio read error, " <<
8315 pa_strerror( pa_error ) << ".";
8316 errorText_ = errorStream_.str();
8317 error( RtAudioError::WARNING );
8319 if ( stream_.doConvertBuffer[INPUT] ) {
8320 convertBuffer( stream_.userBuffer[INPUT],
8321 stream_.deviceBuffer,
8322 stream_.convertInfo[INPUT] );
8327 MUTEX_UNLOCK( &stream_.mutex );
8328 RtApi::tickStreamTime();
8330 if ( doStopStream == 1 )
8334 void RtApiPulse::startStream( void )
8336 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8338 if ( stream_.state == STREAM_CLOSED ) {
8339 errorText_ = "RtApiPulse::startStream(): the stream is not open!";
8340 error( RtAudioError::INVALID_USE );
8343 if ( stream_.state == STREAM_RUNNING ) {
8344 errorText_ = "RtApiPulse::startStream(): the stream is already running!";
8345 error( RtAudioError::WARNING );
8349 MUTEX_LOCK( &stream_.mutex );
8351 stream_.state = STREAM_RUNNING;
8353 pah->runnable = true;
8354 pthread_cond_signal( &pah->runnable_cv );
8355 MUTEX_UNLOCK( &stream_.mutex );
8358 void RtApiPulse::stopStream( void )
8360 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8362 if ( stream_.state == STREAM_CLOSED ) {
8363 errorText_ = "RtApiPulse::stopStream(): the stream is not open!";
8364 error( RtAudioError::INVALID_USE );
8367 if ( stream_.state == STREAM_STOPPED ) {
8368 errorText_ = "RtApiPulse::stopStream(): the stream is already stopped!";
8369 error( RtAudioError::WARNING );
8373 stream_.state = STREAM_STOPPED;
8374 MUTEX_LOCK( &stream_.mutex );
8376 if ( pah && pah->s_play ) {
8378 if ( pa_simple_drain( pah->s_play, &pa_error ) < 0 ) {
8379 errorStream_ << "RtApiPulse::stopStream: error draining output device, " <<
8380 pa_strerror( pa_error ) << ".";
8381 errorText_ = errorStream_.str();
8382 MUTEX_UNLOCK( &stream_.mutex );
8383 error( RtAudioError::SYSTEM_ERROR );
8388 stream_.state = STREAM_STOPPED;
8389 MUTEX_UNLOCK( &stream_.mutex );
8392 void RtApiPulse::abortStream( void )
8394 PulseAudioHandle *pah = static_cast<PulseAudioHandle*>( stream_.apiHandle );
8396 if ( stream_.state == STREAM_CLOSED ) {
8397 errorText_ = "RtApiPulse::abortStream(): the stream is not open!";
8398 error( RtAudioError::INVALID_USE );
8401 if ( stream_.state == STREAM_STOPPED ) {
8402 errorText_ = "RtApiPulse::abortStream(): the stream is already stopped!";
8403 error( RtAudioError::WARNING );
8407 stream_.state = STREAM_STOPPED;
8408 MUTEX_LOCK( &stream_.mutex );
8410 if ( pah && pah->s_play ) {
8412 if ( pa_simple_flush( pah->s_play, &pa_error ) < 0 ) {
8413 errorStream_ << "RtApiPulse::abortStream: error flushing output device, " <<
8414 pa_strerror( pa_error ) << ".";
8415 errorText_ = errorStream_.str();
8416 MUTEX_UNLOCK( &stream_.mutex );
8417 error( RtAudioError::SYSTEM_ERROR );
8422 stream_.state = STREAM_STOPPED;
8423 MUTEX_UNLOCK( &stream_.mutex );
8426 bool RtApiPulse::probeDeviceOpen( unsigned int device, StreamMode mode,
8427 unsigned int channels, unsigned int firstChannel,
8428 unsigned int sampleRate, RtAudioFormat format,
8429 unsigned int *bufferSize, RtAudio::StreamOptions *options )
8431 PulseAudioHandle *pah = 0;
8432 unsigned long bufferBytes = 0;
8435 if ( device != 0 ) return false;
8436 if ( mode != INPUT && mode != OUTPUT ) return false;
8437 if ( channels != 1 && channels != 2 ) {
8438 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported number of channels.";
8441 ss.channels = channels;
8443 if ( firstChannel != 0 ) return false;
8445 bool sr_found = false;
8446 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr ) {
8447 if ( sampleRate == *sr ) {
8449 stream_.sampleRate = sampleRate;
8450 ss.rate = sampleRate;
8455 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported sample rate.";
8460 for ( const rtaudio_pa_format_mapping_t *sf = supported_sampleformats;
8461 sf->rtaudio_format && sf->pa_format != PA_SAMPLE_INVALID; ++sf ) {
8462 if ( format == sf->rtaudio_format ) {
8464 stream_.userFormat = sf->rtaudio_format;
8465 stream_.deviceFormat[mode] = stream_.userFormat;
8466 ss.format = sf->pa_format;
8470 if ( !sf_found ) { // Use internal data format conversion.
8471 stream_.userFormat = format;
8472 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
8473 ss.format = PA_SAMPLE_FLOAT32LE;
8476 // Set other stream parameters.
8477 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
8478 else stream_.userInterleaved = true;
8479 stream_.deviceInterleaved[mode] = true;
8480 stream_.nBuffers = 1;
8481 stream_.doByteSwap[mode] = false;
8482 stream_.nUserChannels[mode] = channels;
8483 stream_.nDeviceChannels[mode] = channels + firstChannel;
8484 stream_.channelOffset[mode] = 0;
8485 std::string streamName = "RtAudio";
8487 // Set flags for buffer conversion.
8488 stream_.doConvertBuffer[mode] = false;
8489 if ( stream_.userFormat != stream_.deviceFormat[mode] )
8490 stream_.doConvertBuffer[mode] = true;
8491 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
8492 stream_.doConvertBuffer[mode] = true;
8494 // Allocate necessary internal buffers.
8495 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
8496 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
8497 if ( stream_.userBuffer[mode] == NULL ) {
8498 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating user buffer memory.";
8501 stream_.bufferSize = *bufferSize;
8503 if ( stream_.doConvertBuffer[mode] ) {
8505 bool makeBuffer = true;
8506 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
8507 if ( mode == INPUT ) {
8508 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
8509 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
8510 if ( bufferBytes <= bytesOut ) makeBuffer = false;
8515 bufferBytes *= *bufferSize;
8516 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
8517 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
8518 if ( stream_.deviceBuffer == NULL ) {
8519 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating device buffer memory.";
8525 stream_.device[mode] = device;
8527 // Setup the buffer conversion information structure.
8528 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
8530 if ( !stream_.apiHandle ) {
8531 PulseAudioHandle *pah = new PulseAudioHandle;
8533 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating memory for handle.";
8537 stream_.apiHandle = pah;
8538 if ( pthread_cond_init( &pah->runnable_cv, NULL ) != 0 ) {
8539 errorText_ = "RtApiPulse::probeDeviceOpen: error creating condition variable.";
8543 pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8546 if ( options && !options->streamName.empty() ) streamName = options->streamName;
8549 pa_buffer_attr buffer_attr;
8550 buffer_attr.fragsize = bufferBytes;
8551 buffer_attr.maxlength = -1;
8553 pah->s_rec = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_RECORD, NULL, "Record", &ss, NULL, &buffer_attr, &error );
8554 if ( !pah->s_rec ) {
8555 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting input to PulseAudio server.";
8560 pah->s_play = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_PLAYBACK, NULL, "Playback", &ss, NULL, NULL, &error );
8561 if ( !pah->s_play ) {
8562 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting output to PulseAudio server.";
8570 if ( stream_.mode == UNINITIALIZED )
8571 stream_.mode = mode;
8572 else if ( stream_.mode == mode )
8575 stream_.mode = DUPLEX;
8577 if ( !stream_.callbackInfo.isRunning ) {
8578 stream_.callbackInfo.object = this;
8580 stream_.state = STREAM_STOPPED;
8581 // Set the thread attributes for joinable and realtime scheduling
8582 // priority (optional). The higher priority will only take affect
8583 // if the program is run as root or suid. Note, under Linux
8584 // processes with CAP_SYS_NICE privilege, a user can change
8585 // scheduling policy and priority (thus need not be root). See
8586 // POSIX "capabilities".
8587 pthread_attr_t attr;
8588 pthread_attr_init( &attr );
8589 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
8590 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
8591 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
8592 stream_.callbackInfo.doRealtime = true;
8593 struct sched_param param;
8594 int priority = options->priority;
8595 int min = sched_get_priority_min( SCHED_RR );
8596 int max = sched_get_priority_max( SCHED_RR );
8597 if ( priority < min ) priority = min;
8598 else if ( priority > max ) priority = max;
8599 param.sched_priority = priority;
8601 // Set the policy BEFORE the priority. Otherwise it fails.
8602 pthread_attr_setschedpolicy(&attr, SCHED_RR);
8603 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
8604 // This is definitely required. Otherwise it fails.
8605 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
8606 pthread_attr_setschedparam(&attr, ¶m);
8609 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
8611 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
8614 stream_.callbackInfo.isRunning = true;
8615 int result = pthread_create( &pah->thread, &attr, pulseaudio_callback, (void *)&stream_.callbackInfo);
8616 pthread_attr_destroy(&attr);
8618 // Failed. Try instead with default attributes.
8619 result = pthread_create( &pah->thread, NULL, pulseaudio_callback, (void *)&stream_.callbackInfo);
8621 stream_.callbackInfo.isRunning = false;
8622 errorText_ = "RtApiPulse::probeDeviceOpen: error creating thread.";
8631 if ( pah && stream_.callbackInfo.isRunning ) {
8632 pthread_cond_destroy( &pah->runnable_cv );
8634 stream_.apiHandle = 0;
8637 for ( int i=0; i<2; i++ ) {
8638 if ( stream_.userBuffer[i] ) {
8639 free( stream_.userBuffer[i] );
8640 stream_.userBuffer[i] = 0;
8644 if ( stream_.deviceBuffer ) {
8645 free( stream_.deviceBuffer );
8646 stream_.deviceBuffer = 0;
8649 stream_.state = STREAM_CLOSED;
8653 //******************** End of __LINUX_PULSE__ *********************//
8656 #if defined(__LINUX_OSS__)
8659 #include <sys/ioctl.h>
8662 #include <sys/soundcard.h>
8666 static void *ossCallbackHandler(void * ptr);
8668 // A structure to hold various information related to the OSS API
8671 int id[2]; // device ids
8674 pthread_cond_t runnable;
8677 :triggered(false) { id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
8680 RtApiOss :: RtApiOss()
8682 // Nothing to do here.
8685 RtApiOss :: ~RtApiOss()
8687 if ( stream_.state != STREAM_CLOSED ) closeStream();
8690 unsigned int RtApiOss :: getDeviceCount( void )
8692 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
8693 if ( mixerfd == -1 ) {
8694 errorText_ = "RtApiOss::getDeviceCount: error opening '/dev/mixer'.";
8695 error( RtAudioError::WARNING );
8699 oss_sysinfo sysinfo;
8700 if ( ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo ) == -1 ) {
8702 errorText_ = "RtApiOss::getDeviceCount: error getting sysinfo, OSS version >= 4.0 is required.";
8703 error( RtAudioError::WARNING );
8708 return sysinfo.numaudios;
8711 RtAudio::DeviceInfo RtApiOss :: getDeviceInfo( unsigned int device )
8713 RtAudio::DeviceInfo info;
8714 info.probed = false;
8716 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
8717 if ( mixerfd == -1 ) {
8718 errorText_ = "RtApiOss::getDeviceInfo: error opening '/dev/mixer'.";
8719 error( RtAudioError::WARNING );
8723 oss_sysinfo sysinfo;
8724 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
8725 if ( result == -1 ) {
8727 errorText_ = "RtApiOss::getDeviceInfo: error getting sysinfo, OSS version >= 4.0 is required.";
8728 error( RtAudioError::WARNING );
8732 unsigned nDevices = sysinfo.numaudios;
8733 if ( nDevices == 0 ) {
8735 errorText_ = "RtApiOss::getDeviceInfo: no devices found!";
8736 error( RtAudioError::INVALID_USE );
8740 if ( device >= nDevices ) {
8742 errorText_ = "RtApiOss::getDeviceInfo: device ID is invalid!";
8743 error( RtAudioError::INVALID_USE );
8747 oss_audioinfo ainfo;
8749 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
8751 if ( result == -1 ) {
8752 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
8753 errorText_ = errorStream_.str();
8754 error( RtAudioError::WARNING );
8759 if ( ainfo.caps & PCM_CAP_OUTPUT ) info.outputChannels = ainfo.max_channels;
8760 if ( ainfo.caps & PCM_CAP_INPUT ) info.inputChannels = ainfo.max_channels;
8761 if ( ainfo.caps & PCM_CAP_DUPLEX ) {
8762 if ( info.outputChannels > 0 && info.inputChannels > 0 && ainfo.caps & PCM_CAP_DUPLEX )
8763 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
8766 // Probe data formats ... do for input
8767 unsigned long mask = ainfo.iformats;
8768 if ( mask & AFMT_S16_LE || mask & AFMT_S16_BE )
8769 info.nativeFormats |= RTAUDIO_SINT16;
8770 if ( mask & AFMT_S8 )
8771 info.nativeFormats |= RTAUDIO_SINT8;
8772 if ( mask & AFMT_S32_LE || mask & AFMT_S32_BE )
8773 info.nativeFormats |= RTAUDIO_SINT32;
8775 if ( mask & AFMT_FLOAT )
8776 info.nativeFormats |= RTAUDIO_FLOAT32;
8778 if ( mask & AFMT_S24_LE || mask & AFMT_S24_BE )
8779 info.nativeFormats |= RTAUDIO_SINT24;
8781 // Check that we have at least one supported format
8782 if ( info.nativeFormats == 0 ) {
8783 errorStream_ << "RtApiOss::getDeviceInfo: device (" << ainfo.name << ") data format not supported by RtAudio.";
8784 errorText_ = errorStream_.str();
8785 error( RtAudioError::WARNING );
8789 // Probe the supported sample rates.
8790 info.sampleRates.clear();
8791 if ( ainfo.nrates ) {
8792 for ( unsigned int i=0; i<ainfo.nrates; i++ ) {
8793 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
8794 if ( ainfo.rates[i] == SAMPLE_RATES[k] ) {
8795 info.sampleRates.push_back( SAMPLE_RATES[k] );
8797 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
8798 info.preferredSampleRate = SAMPLE_RATES[k];
8806 // Check min and max rate values;
8807 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
8808 if ( ainfo.min_rate <= (int) SAMPLE_RATES[k] && ainfo.max_rate >= (int) SAMPLE_RATES[k] ) {
8809 info.sampleRates.push_back( SAMPLE_RATES[k] );
8811 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
8812 info.preferredSampleRate = SAMPLE_RATES[k];
8817 if ( info.sampleRates.size() == 0 ) {
8818 errorStream_ << "RtApiOss::getDeviceInfo: no supported sample rates found for device (" << ainfo.name << ").";
8819 errorText_ = errorStream_.str();
8820 error( RtAudioError::WARNING );
8824 info.name = ainfo.name;
8831 bool RtApiOss :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
8832 unsigned int firstChannel, unsigned int sampleRate,
8833 RtAudioFormat format, unsigned int *bufferSize,
8834 RtAudio::StreamOptions *options )
8836 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
8837 if ( mixerfd == -1 ) {
8838 errorText_ = "RtApiOss::probeDeviceOpen: error opening '/dev/mixer'.";
8842 oss_sysinfo sysinfo;
8843 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
8844 if ( result == -1 ) {
8846 errorText_ = "RtApiOss::probeDeviceOpen: error getting sysinfo, OSS version >= 4.0 is required.";
8850 unsigned nDevices = sysinfo.numaudios;
8851 if ( nDevices == 0 ) {
8852 // This should not happen because a check is made before this function is called.
8854 errorText_ = "RtApiOss::probeDeviceOpen: no devices found!";
8858 if ( device >= nDevices ) {
8859 // This should not happen because a check is made before this function is called.
8861 errorText_ = "RtApiOss::probeDeviceOpen: device ID is invalid!";
8865 oss_audioinfo ainfo;
8867 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
8869 if ( result == -1 ) {
8870 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
8871 errorText_ = errorStream_.str();
8875 // Check if device supports input or output
8876 if ( ( mode == OUTPUT && !( ainfo.caps & PCM_CAP_OUTPUT ) ) ||
8877 ( mode == INPUT && !( ainfo.caps & PCM_CAP_INPUT ) ) ) {
8878 if ( mode == OUTPUT )
8879 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support output.";
8881 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support input.";
8882 errorText_ = errorStream_.str();
8887 OssHandle *handle = (OssHandle *) stream_.apiHandle;
8888 if ( mode == OUTPUT )
8890 else { // mode == INPUT
8891 if (stream_.mode == OUTPUT && stream_.device[0] == device) {
8892 // We just set the same device for playback ... close and reopen for duplex (OSS only).
8893 close( handle->id[0] );
8895 if ( !( ainfo.caps & PCM_CAP_DUPLEX ) ) {
8896 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support duplex mode.";
8897 errorText_ = errorStream_.str();
8900 // Check that the number previously set channels is the same.
8901 if ( stream_.nUserChannels[0] != channels ) {
8902 errorStream_ << "RtApiOss::probeDeviceOpen: input/output channels must be equal for OSS duplex device (" << ainfo.name << ").";
8903 errorText_ = errorStream_.str();
8912 // Set exclusive access if specified.
8913 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) flags |= O_EXCL;
8915 // Try to open the device.
8917 fd = open( ainfo.devnode, flags, 0 );
8919 if ( errno == EBUSY )
8920 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") is busy.";
8922 errorStream_ << "RtApiOss::probeDeviceOpen: error opening device (" << ainfo.name << ").";
8923 errorText_ = errorStream_.str();
8927 // For duplex operation, specifically set this mode (this doesn't seem to work).
8929 if ( flags | O_RDWR ) {
8930 result = ioctl( fd, SNDCTL_DSP_SETDUPLEX, NULL );
8931 if ( result == -1) {
8932 errorStream_ << "RtApiOss::probeDeviceOpen: error setting duplex mode for device (" << ainfo.name << ").";
8933 errorText_ = errorStream_.str();
8939 // Check the device channel support.
8940 stream_.nUserChannels[mode] = channels;
8941 if ( ainfo.max_channels < (int)(channels + firstChannel) ) {
8943 errorStream_ << "RtApiOss::probeDeviceOpen: the device (" << ainfo.name << ") does not support requested channel parameters.";
8944 errorText_ = errorStream_.str();
8948 // Set the number of channels.
8949 int deviceChannels = channels + firstChannel;
8950 result = ioctl( fd, SNDCTL_DSP_CHANNELS, &deviceChannels );
8951 if ( result == -1 || deviceChannels < (int)(channels + firstChannel) ) {
8953 errorStream_ << "RtApiOss::probeDeviceOpen: error setting channel parameters on device (" << ainfo.name << ").";
8954 errorText_ = errorStream_.str();
8957 stream_.nDeviceChannels[mode] = deviceChannels;
8959 // Get the data format mask
8961 result = ioctl( fd, SNDCTL_DSP_GETFMTS, &mask );
8962 if ( result == -1 ) {
8964 errorStream_ << "RtApiOss::probeDeviceOpen: error getting device (" << ainfo.name << ") data formats.";
8965 errorText_ = errorStream_.str();
8969 // Determine how to set the device format.
8970 stream_.userFormat = format;
8971 int deviceFormat = -1;
8972 stream_.doByteSwap[mode] = false;
8973 if ( format == RTAUDIO_SINT8 ) {
8974 if ( mask & AFMT_S8 ) {
8975 deviceFormat = AFMT_S8;
8976 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
8979 else if ( format == RTAUDIO_SINT16 ) {
8980 if ( mask & AFMT_S16_NE ) {
8981 deviceFormat = AFMT_S16_NE;
8982 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
8984 else if ( mask & AFMT_S16_OE ) {
8985 deviceFormat = AFMT_S16_OE;
8986 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
8987 stream_.doByteSwap[mode] = true;
8990 else if ( format == RTAUDIO_SINT24 ) {
8991 if ( mask & AFMT_S24_NE ) {
8992 deviceFormat = AFMT_S24_NE;
8993 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
8995 else if ( mask & AFMT_S24_OE ) {
8996 deviceFormat = AFMT_S24_OE;
8997 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
8998 stream_.doByteSwap[mode] = true;
9001 else if ( format == RTAUDIO_SINT32 ) {
9002 if ( mask & AFMT_S32_NE ) {
9003 deviceFormat = AFMT_S32_NE;
9004 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9006 else if ( mask & AFMT_S32_OE ) {
9007 deviceFormat = AFMT_S32_OE;
9008 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9009 stream_.doByteSwap[mode] = true;
9013 if ( deviceFormat == -1 ) {
9014 // The user requested format is not natively supported by the device.
9015 if ( mask & AFMT_S16_NE ) {
9016 deviceFormat = AFMT_S16_NE;
9017 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9019 else if ( mask & AFMT_S32_NE ) {
9020 deviceFormat = AFMT_S32_NE;
9021 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9023 else if ( mask & AFMT_S24_NE ) {
9024 deviceFormat = AFMT_S24_NE;
9025 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9027 else if ( mask & AFMT_S16_OE ) {
9028 deviceFormat = AFMT_S16_OE;
9029 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9030 stream_.doByteSwap[mode] = true;
9032 else if ( mask & AFMT_S32_OE ) {
9033 deviceFormat = AFMT_S32_OE;
9034 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9035 stream_.doByteSwap[mode] = true;
9037 else if ( mask & AFMT_S24_OE ) {
9038 deviceFormat = AFMT_S24_OE;
9039 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9040 stream_.doByteSwap[mode] = true;
9042 else if ( mask & AFMT_S8) {
9043 deviceFormat = AFMT_S8;
9044 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
9048 if ( stream_.deviceFormat[mode] == 0 ) {
9049 // This really shouldn't happen ...
9051 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") data format not supported by RtAudio.";
9052 errorText_ = errorStream_.str();
9056 // Set the data format.
9057 int temp = deviceFormat;
9058 result = ioctl( fd, SNDCTL_DSP_SETFMT, &deviceFormat );
9059 if ( result == -1 || deviceFormat != temp ) {
9061 errorStream_ << "RtApiOss::probeDeviceOpen: error setting data format on device (" << ainfo.name << ").";
9062 errorText_ = errorStream_.str();
9066 // Attempt to set the buffer size. According to OSS, the minimum
9067 // number of buffers is two. The supposed minimum buffer size is 16
9068 // bytes, so that will be our lower bound. The argument to this
9069 // call is in the form 0xMMMMSSSS (hex), where the buffer size (in
9070 // bytes) is given as 2^SSSS and the number of buffers as 2^MMMM.
9071 // We'll check the actual value used near the end of the setup
9073 int ossBufferBytes = *bufferSize * formatBytes( stream_.deviceFormat[mode] ) * deviceChannels;
9074 if ( ossBufferBytes < 16 ) ossBufferBytes = 16;
9076 if ( options ) buffers = options->numberOfBuffers;
9077 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) buffers = 2;
9078 if ( buffers < 2 ) buffers = 3;
9079 temp = ((int) buffers << 16) + (int)( log10( (double)ossBufferBytes ) / log10( 2.0 ) );
9080 result = ioctl( fd, SNDCTL_DSP_SETFRAGMENT, &temp );
9081 if ( result == -1 ) {
9083 errorStream_ << "RtApiOss::probeDeviceOpen: error setting buffer size on device (" << ainfo.name << ").";
9084 errorText_ = errorStream_.str();
9087 stream_.nBuffers = buffers;
9089 // Save buffer size (in sample frames).
9090 *bufferSize = ossBufferBytes / ( formatBytes(stream_.deviceFormat[mode]) * deviceChannels );
9091 stream_.bufferSize = *bufferSize;
9093 // Set the sample rate.
9094 int srate = sampleRate;
9095 result = ioctl( fd, SNDCTL_DSP_SPEED, &srate );
9096 if ( result == -1 ) {
9098 errorStream_ << "RtApiOss::probeDeviceOpen: error setting sample rate (" << sampleRate << ") on device (" << ainfo.name << ").";
9099 errorText_ = errorStream_.str();
9103 // Verify the sample rate setup worked.
9104 if ( abs( srate - (int)sampleRate ) > 100 ) {
9106 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support sample rate (" << sampleRate << ").";
9107 errorText_ = errorStream_.str();
9110 stream_.sampleRate = sampleRate;
9112 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device) {
9113 // We're doing duplex setup here.
9114 stream_.deviceFormat[0] = stream_.deviceFormat[1];
9115 stream_.nDeviceChannels[0] = deviceChannels;
9118 // Set interleaving parameters.
9119 stream_.userInterleaved = true;
9120 stream_.deviceInterleaved[mode] = true;
9121 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
9122 stream_.userInterleaved = false;
9124 // Set flags for buffer conversion
9125 stream_.doConvertBuffer[mode] = false;
9126 if ( stream_.userFormat != stream_.deviceFormat[mode] )
9127 stream_.doConvertBuffer[mode] = true;
9128 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
9129 stream_.doConvertBuffer[mode] = true;
9130 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
9131 stream_.nUserChannels[mode] > 1 )
9132 stream_.doConvertBuffer[mode] = true;
9134 // Allocate the stream handles if necessary and then save.
9135 if ( stream_.apiHandle == 0 ) {
9137 handle = new OssHandle;
9139 catch ( std::bad_alloc& ) {
9140 errorText_ = "RtApiOss::probeDeviceOpen: error allocating OssHandle memory.";
9144 if ( pthread_cond_init( &handle->runnable, NULL ) ) {
9145 errorText_ = "RtApiOss::probeDeviceOpen: error initializing pthread condition variable.";
9149 stream_.apiHandle = (void *) handle;
9152 handle = (OssHandle *) stream_.apiHandle;
9154 handle->id[mode] = fd;
9156 // Allocate necessary internal buffers.
9157 unsigned long bufferBytes;
9158 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
9159 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
9160 if ( stream_.userBuffer[mode] == NULL ) {
9161 errorText_ = "RtApiOss::probeDeviceOpen: error allocating user buffer memory.";
9165 if ( stream_.doConvertBuffer[mode] ) {
9167 bool makeBuffer = true;
9168 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
9169 if ( mode == INPUT ) {
9170 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
9171 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
9172 if ( bufferBytes <= bytesOut ) makeBuffer = false;
9177 bufferBytes *= *bufferSize;
9178 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
9179 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
9180 if ( stream_.deviceBuffer == NULL ) {
9181 errorText_ = "RtApiOss::probeDeviceOpen: error allocating device buffer memory.";
9187 stream_.device[mode] = device;
9188 stream_.state = STREAM_STOPPED;
9190 // Setup the buffer conversion information structure.
9191 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
9193 // Setup thread if necessary.
9194 if ( stream_.mode == OUTPUT && mode == INPUT ) {
9195 // We had already set up an output stream.
9196 stream_.mode = DUPLEX;
9197 if ( stream_.device[0] == device ) handle->id[0] = fd;
9200 stream_.mode = mode;
9202 // Setup callback thread.
9203 stream_.callbackInfo.object = (void *) this;
9205 // Set the thread attributes for joinable and realtime scheduling
9206 // priority. The higher priority will only take affect if the
9207 // program is run as root or suid.
9208 pthread_attr_t attr;
9209 pthread_attr_init( &attr );
9210 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
9211 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
9212 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
9213 stream_.callbackInfo.doRealtime = true;
9214 struct sched_param param;
9215 int priority = options->priority;
9216 int min = sched_get_priority_min( SCHED_RR );
9217 int max = sched_get_priority_max( SCHED_RR );
9218 if ( priority < min ) priority = min;
9219 else if ( priority > max ) priority = max;
9220 param.sched_priority = priority;
9222 // Set the policy BEFORE the priority. Otherwise it fails.
9223 pthread_attr_setschedpolicy(&attr, SCHED_RR);
9224 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
9225 // This is definitely required. Otherwise it fails.
9226 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
9227 pthread_attr_setschedparam(&attr, ¶m);
9230 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
9232 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
9235 stream_.callbackInfo.isRunning = true;
9236 result = pthread_create( &stream_.callbackInfo.thread, &attr, ossCallbackHandler, &stream_.callbackInfo );
9237 pthread_attr_destroy( &attr );
9239 // Failed. Try instead with default attributes.
9240 result = pthread_create( &stream_.callbackInfo.thread, NULL, ossCallbackHandler, &stream_.callbackInfo );
9242 stream_.callbackInfo.isRunning = false;
9243 errorText_ = "RtApiOss::error creating callback thread!";
9253 pthread_cond_destroy( &handle->runnable );
9254 if ( handle->id[0] ) close( handle->id[0] );
9255 if ( handle->id[1] ) close( handle->id[1] );
9257 stream_.apiHandle = 0;
9260 for ( int i=0; i<2; i++ ) {
9261 if ( stream_.userBuffer[i] ) {
9262 free( stream_.userBuffer[i] );
9263 stream_.userBuffer[i] = 0;
9267 if ( stream_.deviceBuffer ) {
9268 free( stream_.deviceBuffer );
9269 stream_.deviceBuffer = 0;
9272 stream_.state = STREAM_CLOSED;
9276 void RtApiOss :: closeStream()
9278 if ( stream_.state == STREAM_CLOSED ) {
9279 errorText_ = "RtApiOss::closeStream(): no open stream to close!";
9280 error( RtAudioError::WARNING );
9284 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9285 stream_.callbackInfo.isRunning = false;
9286 MUTEX_LOCK( &stream_.mutex );
9287 if ( stream_.state == STREAM_STOPPED )
9288 pthread_cond_signal( &handle->runnable );
9289 MUTEX_UNLOCK( &stream_.mutex );
9290 pthread_join( stream_.callbackInfo.thread, NULL );
9292 if ( stream_.state == STREAM_RUNNING ) {
9293 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
9294 ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9296 ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9297 stream_.state = STREAM_STOPPED;
9301 pthread_cond_destroy( &handle->runnable );
9302 if ( handle->id[0] ) close( handle->id[0] );
9303 if ( handle->id[1] ) close( handle->id[1] );
9305 stream_.apiHandle = 0;
9308 for ( int i=0; i<2; i++ ) {
9309 if ( stream_.userBuffer[i] ) {
9310 free( stream_.userBuffer[i] );
9311 stream_.userBuffer[i] = 0;
9315 if ( stream_.deviceBuffer ) {
9316 free( stream_.deviceBuffer );
9317 stream_.deviceBuffer = 0;
9320 stream_.mode = UNINITIALIZED;
9321 stream_.state = STREAM_CLOSED;
9324 void RtApiOss :: startStream()
9327 if ( stream_.state == STREAM_RUNNING ) {
9328 errorText_ = "RtApiOss::startStream(): the stream is already running!";
9329 error( RtAudioError::WARNING );
9333 MUTEX_LOCK( &stream_.mutex );
9335 stream_.state = STREAM_RUNNING;
9337 // No need to do anything else here ... OSS automatically starts
9338 // when fed samples.
9340 MUTEX_UNLOCK( &stream_.mutex );
9342 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9343 pthread_cond_signal( &handle->runnable );
9346 void RtApiOss :: stopStream()
9349 if ( stream_.state == STREAM_STOPPED ) {
9350 errorText_ = "RtApiOss::stopStream(): the stream is already stopped!";
9351 error( RtAudioError::WARNING );
9355 MUTEX_LOCK( &stream_.mutex );
9357 // The state might change while waiting on a mutex.
9358 if ( stream_.state == STREAM_STOPPED ) {
9359 MUTEX_UNLOCK( &stream_.mutex );
9364 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9365 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9367 // Flush the output with zeros a few times.
9370 RtAudioFormat format;
9372 if ( stream_.doConvertBuffer[0] ) {
9373 buffer = stream_.deviceBuffer;
9374 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
9375 format = stream_.deviceFormat[0];
9378 buffer = stream_.userBuffer[0];
9379 samples = stream_.bufferSize * stream_.nUserChannels[0];
9380 format = stream_.userFormat;
9383 memset( buffer, 0, samples * formatBytes(format) );
9384 for ( unsigned int i=0; i<stream_.nBuffers+1; i++ ) {
9385 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9386 if ( result == -1 ) {
9387 errorText_ = "RtApiOss::stopStream: audio write error.";
9388 error( RtAudioError::WARNING );
9392 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9393 if ( result == -1 ) {
9394 errorStream_ << "RtApiOss::stopStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
9395 errorText_ = errorStream_.str();
9398 handle->triggered = false;
9401 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
9402 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9403 if ( result == -1 ) {
9404 errorStream_ << "RtApiOss::stopStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
9405 errorText_ = errorStream_.str();
9411 stream_.state = STREAM_STOPPED;
9412 MUTEX_UNLOCK( &stream_.mutex );
9414 if ( result != -1 ) return;
9415 error( RtAudioError::SYSTEM_ERROR );
9418 void RtApiOss :: abortStream()
9421 if ( stream_.state == STREAM_STOPPED ) {
9422 errorText_ = "RtApiOss::abortStream(): the stream is already stopped!";
9423 error( RtAudioError::WARNING );
9427 MUTEX_LOCK( &stream_.mutex );
9429 // The state might change while waiting on a mutex.
9430 if ( stream_.state == STREAM_STOPPED ) {
9431 MUTEX_UNLOCK( &stream_.mutex );
9436 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9437 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9438 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9439 if ( result == -1 ) {
9440 errorStream_ << "RtApiOss::abortStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
9441 errorText_ = errorStream_.str();
9444 handle->triggered = false;
9447 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
9448 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9449 if ( result == -1 ) {
9450 errorStream_ << "RtApiOss::abortStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
9451 errorText_ = errorStream_.str();
9457 stream_.state = STREAM_STOPPED;
9458 MUTEX_UNLOCK( &stream_.mutex );
9460 if ( result != -1 ) return;
9461 error( RtAudioError::SYSTEM_ERROR );
9464 void RtApiOss :: callbackEvent()
9466 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9467 if ( stream_.state == STREAM_STOPPED ) {
9468 MUTEX_LOCK( &stream_.mutex );
9469 pthread_cond_wait( &handle->runnable, &stream_.mutex );
9470 if ( stream_.state != STREAM_RUNNING ) {
9471 MUTEX_UNLOCK( &stream_.mutex );
9474 MUTEX_UNLOCK( &stream_.mutex );
9477 if ( stream_.state == STREAM_CLOSED ) {
9478 errorText_ = "RtApiOss::callbackEvent(): the stream is closed ... this shouldn't happen!";
9479 error( RtAudioError::WARNING );
9483 // Invoke user callback to get fresh output data.
9484 int doStopStream = 0;
9485 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
9486 double streamTime = getStreamTime();
9487 RtAudioStreamStatus status = 0;
9488 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
9489 status |= RTAUDIO_OUTPUT_UNDERFLOW;
9490 handle->xrun[0] = false;
9492 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
9493 status |= RTAUDIO_INPUT_OVERFLOW;
9494 handle->xrun[1] = false;
9496 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
9497 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
9498 if ( doStopStream == 2 ) {
9499 this->abortStream();
9503 MUTEX_LOCK( &stream_.mutex );
9505 // The state might change while waiting on a mutex.
9506 if ( stream_.state == STREAM_STOPPED ) goto unlock;
9511 RtAudioFormat format;
9513 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9515 // Setup parameters and do buffer conversion if necessary.
9516 if ( stream_.doConvertBuffer[0] ) {
9517 buffer = stream_.deviceBuffer;
9518 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
9519 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
9520 format = stream_.deviceFormat[0];
9523 buffer = stream_.userBuffer[0];
9524 samples = stream_.bufferSize * stream_.nUserChannels[0];
9525 format = stream_.userFormat;
9528 // Do byte swapping if necessary.
9529 if ( stream_.doByteSwap[0] )
9530 byteSwapBuffer( buffer, samples, format );
9532 if ( stream_.mode == DUPLEX && handle->triggered == false ) {
9534 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
9535 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9536 trig = PCM_ENABLE_INPUT|PCM_ENABLE_OUTPUT;
9537 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
9538 handle->triggered = true;
9541 // Write samples to device.
9542 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9544 if ( result == -1 ) {
9545 // We'll assume this is an underrun, though there isn't a
9546 // specific means for determining that.
9547 handle->xrun[0] = true;
9548 errorText_ = "RtApiOss::callbackEvent: audio write error.";
9549 error( RtAudioError::WARNING );
9550 // Continue on to input section.
9554 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
9556 // Setup parameters.
9557 if ( stream_.doConvertBuffer[1] ) {
9558 buffer = stream_.deviceBuffer;
9559 samples = stream_.bufferSize * stream_.nDeviceChannels[1];
9560 format = stream_.deviceFormat[1];
9563 buffer = stream_.userBuffer[1];
9564 samples = stream_.bufferSize * stream_.nUserChannels[1];
9565 format = stream_.userFormat;
9568 // Read samples from device.
9569 result = read( handle->id[1], buffer, samples * formatBytes(format) );
9571 if ( result == -1 ) {
9572 // We'll assume this is an overrun, though there isn't a
9573 // specific means for determining that.
9574 handle->xrun[1] = true;
9575 errorText_ = "RtApiOss::callbackEvent: audio read error.";
9576 error( RtAudioError::WARNING );
9580 // Do byte swapping if necessary.
9581 if ( stream_.doByteSwap[1] )
9582 byteSwapBuffer( buffer, samples, format );
9584 // Do buffer conversion if necessary.
9585 if ( stream_.doConvertBuffer[1] )
9586 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
9590 MUTEX_UNLOCK( &stream_.mutex );
9592 RtApi::tickStreamTime();
9593 if ( doStopStream == 1 ) this->stopStream();
9596 static void *ossCallbackHandler( void *ptr )
9598 CallbackInfo *info = (CallbackInfo *) ptr;
9599 RtApiOss *object = (RtApiOss *) info->object;
9600 bool *isRunning = &info->isRunning;
9602 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
9603 if (info->doRealtime) {
9604 std::cerr << "RtAudio oss: " <<
9605 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
9606 "running realtime scheduling" << std::endl;
9610 while ( *isRunning == true ) {
9611 pthread_testcancel();
9612 object->callbackEvent();
9615 pthread_exit( NULL );
9618 //******************** End of __LINUX_OSS__ *********************//
9622 // *************************************************** //
9624 // Protected common (OS-independent) RtAudio methods.
9626 // *************************************************** //
9628 // This method can be modified to control the behavior of error
9629 // message printing.
9630 void RtApi :: error( RtAudioError::Type type )
9632 errorStream_.str(""); // clear the ostringstream
9634 RtAudioErrorCallback errorCallback = (RtAudioErrorCallback) stream_.callbackInfo.errorCallback;
9635 if ( errorCallback ) {
9636 // abortStream() can generate new error messages. Ignore them. Just keep original one.
9638 if ( firstErrorOccurred_ )
9641 firstErrorOccurred_ = true;
9642 const std::string errorMessage = errorText_;
9644 if ( type != RtAudioError::WARNING && stream_.state != STREAM_STOPPED) {
9645 stream_.callbackInfo.isRunning = false; // exit from the thread
9649 errorCallback( type, errorMessage );
9650 firstErrorOccurred_ = false;
9654 if ( type == RtAudioError::WARNING && showWarnings_ == true )
9655 std::cerr << '\n' << errorText_ << "\n\n";
9656 else if ( type != RtAudioError::WARNING )
9657 throw( RtAudioError( errorText_, type ) );
9660 void RtApi :: verifyStream()
9662 if ( stream_.state == STREAM_CLOSED ) {
9663 errorText_ = "RtApi:: a stream is not open!";
9664 error( RtAudioError::INVALID_USE );
9668 void RtApi :: clearStreamInfo()
9670 stream_.mode = UNINITIALIZED;
9671 stream_.state = STREAM_CLOSED;
9672 stream_.sampleRate = 0;
9673 stream_.bufferSize = 0;
9674 stream_.nBuffers = 0;
9675 stream_.userFormat = 0;
9676 stream_.userInterleaved = true;
9677 stream_.streamTime = 0.0;
9678 stream_.apiHandle = 0;
9679 stream_.deviceBuffer = 0;
9680 stream_.callbackInfo.callback = 0;
9681 stream_.callbackInfo.userData = 0;
9682 stream_.callbackInfo.isRunning = false;
9683 stream_.callbackInfo.errorCallback = 0;
9684 for ( int i=0; i<2; i++ ) {
9685 stream_.device[i] = 11111;
9686 stream_.doConvertBuffer[i] = false;
9687 stream_.deviceInterleaved[i] = true;
9688 stream_.doByteSwap[i] = false;
9689 stream_.nUserChannels[i] = 0;
9690 stream_.nDeviceChannels[i] = 0;
9691 stream_.channelOffset[i] = 0;
9692 stream_.deviceFormat[i] = 0;
9693 stream_.latency[i] = 0;
9694 stream_.userBuffer[i] = 0;
9695 stream_.convertInfo[i].channels = 0;
9696 stream_.convertInfo[i].inJump = 0;
9697 stream_.convertInfo[i].outJump = 0;
9698 stream_.convertInfo[i].inFormat = 0;
9699 stream_.convertInfo[i].outFormat = 0;
9700 stream_.convertInfo[i].inOffset.clear();
9701 stream_.convertInfo[i].outOffset.clear();
9705 unsigned int RtApi :: formatBytes( RtAudioFormat format )
9707 if ( format == RTAUDIO_SINT16 )
9709 else if ( format == RTAUDIO_SINT32 || format == RTAUDIO_FLOAT32 )
9711 else if ( format == RTAUDIO_FLOAT64 )
9713 else if ( format == RTAUDIO_SINT24 )
9715 else if ( format == RTAUDIO_SINT8 )
9718 errorText_ = "RtApi::formatBytes: undefined format.";
9719 error( RtAudioError::WARNING );
9724 void RtApi :: setConvertInfo( StreamMode mode, unsigned int firstChannel )
9726 if ( mode == INPUT ) { // convert device to user buffer
9727 stream_.convertInfo[mode].inJump = stream_.nDeviceChannels[1];
9728 stream_.convertInfo[mode].outJump = stream_.nUserChannels[1];
9729 stream_.convertInfo[mode].inFormat = stream_.deviceFormat[1];
9730 stream_.convertInfo[mode].outFormat = stream_.userFormat;
9732 else { // convert user to device buffer
9733 stream_.convertInfo[mode].inJump = stream_.nUserChannels[0];
9734 stream_.convertInfo[mode].outJump = stream_.nDeviceChannels[0];
9735 stream_.convertInfo[mode].inFormat = stream_.userFormat;
9736 stream_.convertInfo[mode].outFormat = stream_.deviceFormat[0];
9739 if ( stream_.convertInfo[mode].inJump < stream_.convertInfo[mode].outJump )
9740 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].inJump;
9742 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].outJump;
9744 // Set up the interleave/deinterleave offsets.
9745 if ( stream_.deviceInterleaved[mode] != stream_.userInterleaved ) {
9746 if ( ( mode == OUTPUT && stream_.deviceInterleaved[mode] ) ||
9747 ( mode == INPUT && stream_.userInterleaved ) ) {
9748 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
9749 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
9750 stream_.convertInfo[mode].outOffset.push_back( k );
9751 stream_.convertInfo[mode].inJump = 1;
9755 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
9756 stream_.convertInfo[mode].inOffset.push_back( k );
9757 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
9758 stream_.convertInfo[mode].outJump = 1;
9762 else { // no (de)interleaving
9763 if ( stream_.userInterleaved ) {
9764 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
9765 stream_.convertInfo[mode].inOffset.push_back( k );
9766 stream_.convertInfo[mode].outOffset.push_back( k );
9770 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
9771 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
9772 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
9773 stream_.convertInfo[mode].inJump = 1;
9774 stream_.convertInfo[mode].outJump = 1;
9779 // Add channel offset.
9780 if ( firstChannel > 0 ) {
9781 if ( stream_.deviceInterleaved[mode] ) {
9782 if ( mode == OUTPUT ) {
9783 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
9784 stream_.convertInfo[mode].outOffset[k] += firstChannel;
9787 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
9788 stream_.convertInfo[mode].inOffset[k] += firstChannel;
9792 if ( mode == OUTPUT ) {
9793 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
9794 stream_.convertInfo[mode].outOffset[k] += ( firstChannel * stream_.bufferSize );
9797 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
9798 stream_.convertInfo[mode].inOffset[k] += ( firstChannel * stream_.bufferSize );
9804 void RtApi :: convertBuffer( char *outBuffer, char *inBuffer, ConvertInfo &info )
9806 // This function does format conversion, input/output channel compensation, and
9807 // data interleaving/deinterleaving. 24-bit integers are assumed to occupy
9808 // the lower three bytes of a 32-bit integer.
9810 // Clear our device buffer when in/out duplex device channels are different
9811 if ( outBuffer == stream_.deviceBuffer && stream_.mode == DUPLEX &&
9812 ( stream_.nDeviceChannels[0] < stream_.nDeviceChannels[1] ) )
9813 memset( outBuffer, 0, stream_.bufferSize * info.outJump * formatBytes( info.outFormat ) );
9816 if (info.outFormat == RTAUDIO_FLOAT64) {
9818 Float64 *out = (Float64 *)outBuffer;
9820 if (info.inFormat == RTAUDIO_SINT8) {
9821 signed char *in = (signed char *)inBuffer;
9822 scale = 1.0 / 127.5;
9823 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9824 for (j=0; j<info.channels; j++) {
9825 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
9826 out[info.outOffset[j]] += 0.5;
9827 out[info.outOffset[j]] *= scale;
9830 out += info.outJump;
9833 else if (info.inFormat == RTAUDIO_SINT16) {
9834 Int16 *in = (Int16 *)inBuffer;
9835 scale = 1.0 / 32767.5;
9836 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9837 for (j=0; j<info.channels; j++) {
9838 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
9839 out[info.outOffset[j]] += 0.5;
9840 out[info.outOffset[j]] *= scale;
9843 out += info.outJump;
9846 else if (info.inFormat == RTAUDIO_SINT24) {
9847 Int24 *in = (Int24 *)inBuffer;
9848 scale = 1.0 / 8388607.5;
9849 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9850 for (j=0; j<info.channels; j++) {
9851 out[info.outOffset[j]] = (Float64) (in[info.inOffset[j]].asInt());
9852 out[info.outOffset[j]] += 0.5;
9853 out[info.outOffset[j]] *= scale;
9856 out += info.outJump;
9859 else if (info.inFormat == RTAUDIO_SINT32) {
9860 Int32 *in = (Int32 *)inBuffer;
9861 scale = 1.0 / 2147483647.5;
9862 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9863 for (j=0; j<info.channels; j++) {
9864 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
9865 out[info.outOffset[j]] += 0.5;
9866 out[info.outOffset[j]] *= scale;
9869 out += info.outJump;
9872 else if (info.inFormat == RTAUDIO_FLOAT32) {
9873 Float32 *in = (Float32 *)inBuffer;
9874 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9875 for (j=0; j<info.channels; j++) {
9876 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
9879 out += info.outJump;
9882 else if (info.inFormat == RTAUDIO_FLOAT64) {
9883 // Channel compensation and/or (de)interleaving only.
9884 Float64 *in = (Float64 *)inBuffer;
9885 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9886 for (j=0; j<info.channels; j++) {
9887 out[info.outOffset[j]] = in[info.inOffset[j]];
9890 out += info.outJump;
9894 else if (info.outFormat == RTAUDIO_FLOAT32) {
9896 Float32 *out = (Float32 *)outBuffer;
9898 if (info.inFormat == RTAUDIO_SINT8) {
9899 signed char *in = (signed char *)inBuffer;
9900 scale = (Float32) ( 1.0 / 127.5 );
9901 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9902 for (j=0; j<info.channels; j++) {
9903 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
9904 out[info.outOffset[j]] += 0.5;
9905 out[info.outOffset[j]] *= scale;
9908 out += info.outJump;
9911 else if (info.inFormat == RTAUDIO_SINT16) {
9912 Int16 *in = (Int16 *)inBuffer;
9913 scale = (Float32) ( 1.0 / 32767.5 );
9914 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9915 for (j=0; j<info.channels; j++) {
9916 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
9917 out[info.outOffset[j]] += 0.5;
9918 out[info.outOffset[j]] *= scale;
9921 out += info.outJump;
9924 else if (info.inFormat == RTAUDIO_SINT24) {
9925 Int24 *in = (Int24 *)inBuffer;
9926 scale = (Float32) ( 1.0 / 8388607.5 );
9927 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9928 for (j=0; j<info.channels; j++) {
9929 out[info.outOffset[j]] = (Float32) (in[info.inOffset[j]].asInt());
9930 out[info.outOffset[j]] += 0.5;
9931 out[info.outOffset[j]] *= scale;
9934 out += info.outJump;
9937 else if (info.inFormat == RTAUDIO_SINT32) {
9938 Int32 *in = (Int32 *)inBuffer;
9939 scale = (Float32) ( 1.0 / 2147483647.5 );
9940 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9941 for (j=0; j<info.channels; j++) {
9942 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
9943 out[info.outOffset[j]] += 0.5;
9944 out[info.outOffset[j]] *= scale;
9947 out += info.outJump;
9950 else if (info.inFormat == RTAUDIO_FLOAT32) {
9951 // Channel compensation and/or (de)interleaving only.
9952 Float32 *in = (Float32 *)inBuffer;
9953 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9954 for (j=0; j<info.channels; j++) {
9955 out[info.outOffset[j]] = in[info.inOffset[j]];
9958 out += info.outJump;
9961 else if (info.inFormat == RTAUDIO_FLOAT64) {
9962 Float64 *in = (Float64 *)inBuffer;
9963 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9964 for (j=0; j<info.channels; j++) {
9965 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
9968 out += info.outJump;
9972 else if (info.outFormat == RTAUDIO_SINT32) {
9973 Int32 *out = (Int32 *)outBuffer;
9974 if (info.inFormat == RTAUDIO_SINT8) {
9975 signed char *in = (signed char *)inBuffer;
9976 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9977 for (j=0; j<info.channels; j++) {
9978 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
9979 out[info.outOffset[j]] <<= 24;
9982 out += info.outJump;
9985 else if (info.inFormat == RTAUDIO_SINT16) {
9986 Int16 *in = (Int16 *)inBuffer;
9987 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9988 for (j=0; j<info.channels; j++) {
9989 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
9990 out[info.outOffset[j]] <<= 16;
9993 out += info.outJump;
9996 else if (info.inFormat == RTAUDIO_SINT24) {
9997 Int24 *in = (Int24 *)inBuffer;
9998 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9999 for (j=0; j<info.channels; j++) {
10000 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]].asInt();
10001 out[info.outOffset[j]] <<= 8;
10004 out += info.outJump;
10007 else if (info.inFormat == RTAUDIO_SINT32) {
10008 // Channel compensation and/or (de)interleaving only.
10009 Int32 *in = (Int32 *)inBuffer;
10010 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10011 for (j=0; j<info.channels; j++) {
10012 out[info.outOffset[j]] = in[info.inOffset[j]];
10015 out += info.outJump;
10018 else if (info.inFormat == RTAUDIO_FLOAT32) {
10019 Float32 *in = (Float32 *)inBuffer;
10020 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10021 for (j=0; j<info.channels; j++) {
10022 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
10025 out += info.outJump;
10028 else if (info.inFormat == RTAUDIO_FLOAT64) {
10029 Float64 *in = (Float64 *)inBuffer;
10030 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10031 for (j=0; j<info.channels; j++) {
10032 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
10035 out += info.outJump;
10039 else if (info.outFormat == RTAUDIO_SINT24) {
10040 Int24 *out = (Int24 *)outBuffer;
10041 if (info.inFormat == RTAUDIO_SINT8) {
10042 signed char *in = (signed char *)inBuffer;
10043 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10044 for (j=0; j<info.channels; j++) {
10045 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 16);
10046 //out[info.outOffset[j]] <<= 16;
10049 out += info.outJump;
10052 else if (info.inFormat == RTAUDIO_SINT16) {
10053 Int16 *in = (Int16 *)inBuffer;
10054 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10055 for (j=0; j<info.channels; j++) {
10056 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 8);
10057 //out[info.outOffset[j]] <<= 8;
10060 out += info.outJump;
10063 else if (info.inFormat == RTAUDIO_SINT24) {
10064 // Channel compensation and/or (de)interleaving only.
10065 Int24 *in = (Int24 *)inBuffer;
10066 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10067 for (j=0; j<info.channels; j++) {
10068 out[info.outOffset[j]] = in[info.inOffset[j]];
10071 out += info.outJump;
10074 else if (info.inFormat == RTAUDIO_SINT32) {
10075 Int32 *in = (Int32 *)inBuffer;
10076 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10077 for (j=0; j<info.channels; j++) {
10078 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] >> 8);
10079 //out[info.outOffset[j]] >>= 8;
10082 out += info.outJump;
10085 else if (info.inFormat == RTAUDIO_FLOAT32) {
10086 Float32 *in = (Float32 *)inBuffer;
10087 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10088 for (j=0; j<info.channels; j++) {
10089 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
10092 out += info.outJump;
10095 else if (info.inFormat == RTAUDIO_FLOAT64) {
10096 Float64 *in = (Float64 *)inBuffer;
10097 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10098 for (j=0; j<info.channels; j++) {
10099 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
10102 out += info.outJump;
10106 else if (info.outFormat == RTAUDIO_SINT16) {
10107 Int16 *out = (Int16 *)outBuffer;
10108 if (info.inFormat == RTAUDIO_SINT8) {
10109 signed char *in = (signed char *)inBuffer;
10110 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10111 for (j=0; j<info.channels; j++) {
10112 out[info.outOffset[j]] = (Int16) in[info.inOffset[j]];
10113 out[info.outOffset[j]] <<= 8;
10116 out += info.outJump;
10119 else if (info.inFormat == RTAUDIO_SINT16) {
10120 // Channel compensation and/or (de)interleaving only.
10121 Int16 *in = (Int16 *)inBuffer;
10122 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10123 for (j=0; j<info.channels; j++) {
10124 out[info.outOffset[j]] = in[info.inOffset[j]];
10127 out += info.outJump;
10130 else if (info.inFormat == RTAUDIO_SINT24) {
10131 Int24 *in = (Int24 *)inBuffer;
10132 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10133 for (j=0; j<info.channels; j++) {
10134 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]].asInt() >> 8);
10137 out += info.outJump;
10140 else if (info.inFormat == RTAUDIO_SINT32) {
10141 Int32 *in = (Int32 *)inBuffer;
10142 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10143 for (j=0; j<info.channels; j++) {
10144 out[info.outOffset[j]] = (Int16) ((in[info.inOffset[j]] >> 16) & 0x0000ffff);
10147 out += info.outJump;
10150 else if (info.inFormat == RTAUDIO_FLOAT32) {
10151 Float32 *in = (Float32 *)inBuffer;
10152 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10153 for (j=0; j<info.channels; j++) {
10154 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
10157 out += info.outJump;
10160 else if (info.inFormat == RTAUDIO_FLOAT64) {
10161 Float64 *in = (Float64 *)inBuffer;
10162 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10163 for (j=0; j<info.channels; j++) {
10164 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
10167 out += info.outJump;
10171 else if (info.outFormat == RTAUDIO_SINT8) {
10172 signed char *out = (signed char *)outBuffer;
10173 if (info.inFormat == RTAUDIO_SINT8) {
10174 // Channel compensation and/or (de)interleaving only.
10175 signed char *in = (signed char *)inBuffer;
10176 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10177 for (j=0; j<info.channels; j++) {
10178 out[info.outOffset[j]] = in[info.inOffset[j]];
10181 out += info.outJump;
10184 if (info.inFormat == RTAUDIO_SINT16) {
10185 Int16 *in = (Int16 *)inBuffer;
10186 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10187 for (j=0; j<info.channels; j++) {
10188 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 8) & 0x00ff);
10191 out += info.outJump;
10194 else if (info.inFormat == RTAUDIO_SINT24) {
10195 Int24 *in = (Int24 *)inBuffer;
10196 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10197 for (j=0; j<info.channels; j++) {
10198 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]].asInt() >> 16);
10201 out += info.outJump;
10204 else if (info.inFormat == RTAUDIO_SINT32) {
10205 Int32 *in = (Int32 *)inBuffer;
10206 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10207 for (j=0; j<info.channels; j++) {
10208 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 24) & 0x000000ff);
10211 out += info.outJump;
10214 else if (info.inFormat == RTAUDIO_FLOAT32) {
10215 Float32 *in = (Float32 *)inBuffer;
10216 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10217 for (j=0; j<info.channels; j++) {
10218 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
10221 out += info.outJump;
10224 else if (info.inFormat == RTAUDIO_FLOAT64) {
10225 Float64 *in = (Float64 *)inBuffer;
10226 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10227 for (j=0; j<info.channels; j++) {
10228 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
10231 out += info.outJump;
10237 //static inline uint16_t bswap_16(uint16_t x) { return (x>>8) | (x<<8); }
10238 //static inline uint32_t bswap_32(uint32_t x) { return (bswap_16(x&0xffff)<<16) | (bswap_16(x>>16)); }
10239 //static inline uint64_t bswap_64(uint64_t x) { return (((unsigned long long)bswap_32(x&0xffffffffull))<<32) | (bswap_32(x>>32)); }
10241 void RtApi :: byteSwapBuffer( char *buffer, unsigned int samples, RtAudioFormat format )
10247 if ( format == RTAUDIO_SINT16 ) {
10248 for ( unsigned int i=0; i<samples; i++ ) {
10249 // Swap 1st and 2nd bytes.
10254 // Increment 2 bytes.
10258 else if ( format == RTAUDIO_SINT32 ||
10259 format == RTAUDIO_FLOAT32 ) {
10260 for ( unsigned int i=0; i<samples; i++ ) {
10261 // Swap 1st and 4th bytes.
10266 // Swap 2nd and 3rd bytes.
10272 // Increment 3 more bytes.
10276 else if ( format == RTAUDIO_SINT24 ) {
10277 for ( unsigned int i=0; i<samples; i++ ) {
10278 // Swap 1st and 3rd bytes.
10283 // Increment 2 more bytes.
10287 else if ( format == RTAUDIO_FLOAT64 ) {
10288 for ( unsigned int i=0; i<samples; i++ ) {
10289 // Swap 1st and 8th bytes
10294 // Swap 2nd and 7th bytes
10300 // Swap 3rd and 6th bytes
10306 // Swap 4th and 5th bytes
10312 // Increment 5 more bytes.
10318 // Indentation settings for Vim and Emacs
10320 // Local Variables:
10321 // c-basic-offset: 2
10322 // indent-tabs-mode: nil
10325 // vim: et sts=2 sw=2