1 /************************************************************************/
3 \brief Realtime audio i/o C++ classes.
5 RtAudio provides a common API (Application Programming Interface)
6 for realtime audio input/output across Linux (native ALSA, Jack,
7 and OSS), Macintosh OS X (CoreAudio and Jack), and Windows
8 (DirectSound, ASIO and WASAPI) operating systems.
10 RtAudio GitHub site: https://github.com/thestk/rtaudio
11 RtAudio WWW site: http://www.music.mcgill.ca/~gary/rtaudio/
13 RtAudio: realtime audio i/o C++ classes
14 Copyright (c) 2001-2019 Gary P. Scavone
16 Permission is hereby granted, free of charge, to any person
17 obtaining a copy of this software and associated documentation files
18 (the "Software"), to deal in the Software without restriction,
19 including without limitation the rights to use, copy, modify, merge,
20 publish, distribute, sublicense, and/or sell copies of the Software,
21 and to permit persons to whom the Software is furnished to do so,
22 subject to the following conditions:
24 The above copyright notice and this permission notice shall be
25 included in all copies or substantial portions of the Software.
27 Any person wishing to distribute modifications to the Software is
28 asked to send the modifications to the original developer so that
29 they can be incorporated into the canonical version. This is,
30 however, not a binding provision of this license.
32 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
33 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
34 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
35 IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
36 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
37 CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
38 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
40 /************************************************************************/
42 // RtAudio: Version 5.1.0
52 // Static variable definitions.
53 const unsigned int RtApi::MAX_SAMPLE_RATES = 14;
54 const unsigned int RtApi::SAMPLE_RATES[] = {
55 4000, 5512, 8000, 9600, 11025, 16000, 22050,
56 32000, 44100, 48000, 88200, 96000, 176400, 192000
59 #if defined(__WINDOWS_DS__) || defined(__WINDOWS_ASIO__) || defined(__WINDOWS_WASAPI__)
60 #define MUTEX_INITIALIZE(A) InitializeCriticalSection(A)
61 #define MUTEX_DESTROY(A) DeleteCriticalSection(A)
62 #define MUTEX_LOCK(A) EnterCriticalSection(A)
63 #define MUTEX_UNLOCK(A) LeaveCriticalSection(A)
67 static std::string convertCharPointerToStdString(const char *text)
69 return std::string(text);
72 static std::string convertCharPointerToStdString(const wchar_t *text)
74 int length = WideCharToMultiByte(CP_UTF8, 0, text, -1, NULL, 0, NULL, NULL);
75 std::string s( length-1, '\0' );
76 WideCharToMultiByte(CP_UTF8, 0, text, -1, &s[0], length, NULL, NULL);
80 #elif defined(__LINUX_ALSA__) || defined(__LINUX_PULSE__) || defined(__UNIX_JACK__) || defined(__LINUX_OSS__) || defined(__MACOSX_CORE__)
82 #define MUTEX_INITIALIZE(A) pthread_mutex_init(A, NULL)
83 #define MUTEX_DESTROY(A) pthread_mutex_destroy(A)
84 #define MUTEX_LOCK(A) pthread_mutex_lock(A)
85 #define MUTEX_UNLOCK(A) pthread_mutex_unlock(A)
87 #define MUTEX_INITIALIZE(A) abs(*A) // dummy definitions
88 #define MUTEX_DESTROY(A) abs(*A) // dummy definitions
91 // *************************************************** //
93 // RtAudio definitions.
95 // *************************************************** //
97 std::string RtAudio :: getVersion( void )
99 return RTAUDIO_VERSION;
102 // Define API names and display names.
103 // Must be in same order as API enum.
105 const char* rtaudio_api_names[][2] = {
106 { "unspecified" , "Unknown" },
108 { "pulse" , "Pulse" },
109 { "oss" , "OpenSoundSystem" },
111 { "core" , "CoreAudio" },
112 { "wasapi" , "WASAPI" },
114 { "ds" , "DirectSound" },
115 { "dummy" , "Dummy" },
117 const unsigned int rtaudio_num_api_names =
118 sizeof(rtaudio_api_names)/sizeof(rtaudio_api_names[0]);
120 // The order here will control the order of RtAudio's API search in
122 extern "C" const RtAudio::Api rtaudio_compiled_apis[] = {
123 #if defined(__UNIX_JACK__)
126 #if defined(__LINUX_PULSE__)
127 RtAudio::LINUX_PULSE,
129 #if defined(__LINUX_ALSA__)
132 #if defined(__LINUX_OSS__)
135 #if defined(__WINDOWS_ASIO__)
136 RtAudio::WINDOWS_ASIO,
138 #if defined(__WINDOWS_WASAPI__)
139 RtAudio::WINDOWS_WASAPI,
141 #if defined(__WINDOWS_DS__)
144 #if defined(__MACOSX_CORE__)
145 RtAudio::MACOSX_CORE,
147 #if defined(__RTAUDIO_DUMMY__)
148 RtAudio::RTAUDIO_DUMMY,
150 RtAudio::UNSPECIFIED,
152 extern "C" const unsigned int rtaudio_num_compiled_apis =
153 sizeof(rtaudio_compiled_apis)/sizeof(rtaudio_compiled_apis[0])-1;
156 // This is a compile-time check that rtaudio_num_api_names == RtAudio::NUM_APIS.
157 // If the build breaks here, check that they match.
158 template<bool b> class StaticAssert { private: StaticAssert() {} };
159 template<> class StaticAssert<true>{ public: StaticAssert() {} };
160 class StaticAssertions { StaticAssertions() {
161 StaticAssert<rtaudio_num_api_names == RtAudio::NUM_APIS>();
164 void RtAudio :: getCompiledApi( std::vector<RtAudio::Api> &apis )
166 apis = std::vector<RtAudio::Api>(rtaudio_compiled_apis,
167 rtaudio_compiled_apis + rtaudio_num_compiled_apis);
170 std::string RtAudio :: getApiName( RtAudio::Api api )
172 if (api < 0 || api >= RtAudio::NUM_APIS)
174 return rtaudio_api_names[api][0];
177 std::string RtAudio :: getApiDisplayName( RtAudio::Api api )
179 if (api < 0 || api >= RtAudio::NUM_APIS)
181 return rtaudio_api_names[api][1];
184 RtAudio::Api RtAudio :: getCompiledApiByName( const std::string &name )
187 for (i = 0; i < rtaudio_num_compiled_apis; ++i)
188 if (name == rtaudio_api_names[rtaudio_compiled_apis[i]][0])
189 return rtaudio_compiled_apis[i];
190 return RtAudio::UNSPECIFIED;
193 void RtAudio :: openRtApi( RtAudio::Api api )
199 #if defined(__UNIX_JACK__)
200 if ( api == UNIX_JACK )
201 rtapi_ = new RtApiJack();
203 #if defined(__LINUX_ALSA__)
204 if ( api == LINUX_ALSA )
205 rtapi_ = new RtApiAlsa();
207 #if defined(__LINUX_PULSE__)
208 if ( api == LINUX_PULSE )
209 rtapi_ = new RtApiPulse();
211 #if defined(__LINUX_OSS__)
212 if ( api == LINUX_OSS )
213 rtapi_ = new RtApiOss();
215 #if defined(__WINDOWS_ASIO__)
216 if ( api == WINDOWS_ASIO )
217 rtapi_ = new RtApiAsio();
219 #if defined(__WINDOWS_WASAPI__)
220 if ( api == WINDOWS_WASAPI )
221 rtapi_ = new RtApiWasapi();
223 #if defined(__WINDOWS_DS__)
224 if ( api == WINDOWS_DS )
225 rtapi_ = new RtApiDs();
227 #if defined(__MACOSX_CORE__)
228 if ( api == MACOSX_CORE )
229 rtapi_ = new RtApiCore();
231 #if defined(__RTAUDIO_DUMMY__)
232 if ( api == RTAUDIO_DUMMY )
233 rtapi_ = new RtApiDummy();
237 RtAudio :: RtAudio( RtAudio::Api api )
241 if ( api != UNSPECIFIED ) {
242 // Attempt to open the specified API.
244 if ( rtapi_ ) return;
246 // No compiled support for specified API value. Issue a debug
247 // warning and continue as if no API was specified.
248 std::cerr << "\nRtAudio: no compiled support for specified API argument!\n" << std::endl;
251 // Iterate through the compiled APIs and return as soon as we find
252 // one with at least one device or we reach the end of the list.
253 std::vector< RtAudio::Api > apis;
254 getCompiledApi( apis );
255 for ( unsigned int i=0; i<apis.size(); i++ ) {
256 openRtApi( apis[i] );
257 if ( rtapi_ && rtapi_->getDeviceCount() ) break;
260 if ( rtapi_ ) return;
262 // It should not be possible to get here because the preprocessor
263 // definition __RTAUDIO_DUMMY__ is automatically defined if no
264 // API-specific definitions are passed to the compiler. But just in
265 // case something weird happens, we'll thow an error.
266 std::string errorText = "\nRtAudio: no compiled API support found ... critical error!!\n\n";
267 throw( RtAudioError( errorText, RtAudioError::UNSPECIFIED ) );
270 RtAudio :: ~RtAudio()
276 void RtAudio :: openStream( RtAudio::StreamParameters *outputParameters,
277 RtAudio::StreamParameters *inputParameters,
278 RtAudioFormat format, unsigned int sampleRate,
279 unsigned int *bufferFrames,
280 RtAudioCallback callback, void *userData,
281 RtAudio::StreamOptions *options,
282 RtAudioErrorCallback errorCallback )
284 return rtapi_->openStream( outputParameters, inputParameters, format,
285 sampleRate, bufferFrames, callback,
286 userData, options, errorCallback );
289 // *************************************************** //
291 // Public RtApi definitions (see end of file for
292 // private or protected utility functions).
294 // *************************************************** //
298 stream_.state = STREAM_CLOSED;
299 stream_.mode = UNINITIALIZED;
300 stream_.apiHandle = 0;
301 stream_.userBuffer[0] = 0;
302 stream_.userBuffer[1] = 0;
303 MUTEX_INITIALIZE( &stream_.mutex );
304 showWarnings_ = true;
305 firstErrorOccurred_ = false;
310 MUTEX_DESTROY( &stream_.mutex );
313 void RtApi :: openStream( RtAudio::StreamParameters *oParams,
314 RtAudio::StreamParameters *iParams,
315 RtAudioFormat format, unsigned int sampleRate,
316 unsigned int *bufferFrames,
317 RtAudioCallback callback, void *userData,
318 RtAudio::StreamOptions *options,
319 RtAudioErrorCallback errorCallback )
321 if ( stream_.state != STREAM_CLOSED ) {
322 errorText_ = "RtApi::openStream: a stream is already open!";
323 error( RtAudioError::INVALID_USE );
327 // Clear stream information potentially left from a previously open stream.
330 if ( oParams && oParams->nChannels < 1 ) {
331 errorText_ = "RtApi::openStream: a non-NULL output StreamParameters structure cannot have an nChannels value less than one.";
332 error( RtAudioError::INVALID_USE );
336 if ( iParams && iParams->nChannels < 1 ) {
337 errorText_ = "RtApi::openStream: a non-NULL input StreamParameters structure cannot have an nChannels value less than one.";
338 error( RtAudioError::INVALID_USE );
342 if ( oParams == NULL && iParams == NULL ) {
343 errorText_ = "RtApi::openStream: input and output StreamParameters structures are both NULL!";
344 error( RtAudioError::INVALID_USE );
348 if ( formatBytes(format) == 0 ) {
349 errorText_ = "RtApi::openStream: 'format' parameter value is undefined.";
350 error( RtAudioError::INVALID_USE );
354 unsigned int nDevices = getDeviceCount();
355 unsigned int oChannels = 0;
357 oChannels = oParams->nChannels;
358 if ( oParams->deviceId >= nDevices ) {
359 errorText_ = "RtApi::openStream: output device parameter value is invalid.";
360 error( RtAudioError::INVALID_USE );
365 unsigned int iChannels = 0;
367 iChannels = iParams->nChannels;
368 if ( iParams->deviceId >= nDevices ) {
369 errorText_ = "RtApi::openStream: input device parameter value is invalid.";
370 error( RtAudioError::INVALID_USE );
377 if ( oChannels > 0 ) {
379 result = probeDeviceOpen( oParams->deviceId, OUTPUT, oChannels, oParams->firstChannel,
380 sampleRate, format, bufferFrames, options );
381 if ( result == false ) {
382 error( RtAudioError::SYSTEM_ERROR );
387 if ( iChannels > 0 ) {
389 result = probeDeviceOpen( iParams->deviceId, INPUT, iChannels, iParams->firstChannel,
390 sampleRate, format, bufferFrames, options );
391 if ( result == false ) {
392 if ( oChannels > 0 ) closeStream();
393 error( RtAudioError::SYSTEM_ERROR );
398 stream_.callbackInfo.callback = (void *) callback;
399 stream_.callbackInfo.userData = userData;
400 stream_.callbackInfo.errorCallback = (void *) errorCallback;
402 if ( options ) options->numberOfBuffers = stream_.nBuffers;
403 stream_.state = STREAM_STOPPED;
406 unsigned int RtApi :: getDefaultInputDevice( void )
408 // Should be implemented in subclasses if possible.
412 unsigned int RtApi :: getDefaultOutputDevice( void )
414 // Should be implemented in subclasses if possible.
418 void RtApi :: closeStream( void )
420 // MUST be implemented in subclasses!
424 bool RtApi :: probeDeviceOpen( unsigned int /*device*/, StreamMode /*mode*/, unsigned int /*channels*/,
425 unsigned int /*firstChannel*/, unsigned int /*sampleRate*/,
426 RtAudioFormat /*format*/, unsigned int * /*bufferSize*/,
427 RtAudio::StreamOptions * /*options*/ )
429 // MUST be implemented in subclasses!
433 void RtApi :: tickStreamTime( void )
435 // Subclasses that do not provide their own implementation of
436 // getStreamTime should call this function once per buffer I/O to
437 // provide basic stream time support.
439 stream_.streamTime += ( stream_.bufferSize * 1.0 / stream_.sampleRate );
441 #if defined( HAVE_GETTIMEOFDAY )
442 gettimeofday( &stream_.lastTickTimestamp, NULL );
446 long RtApi :: getStreamLatency( void )
450 long totalLatency = 0;
451 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
452 totalLatency = stream_.latency[0];
453 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
454 totalLatency += stream_.latency[1];
459 double RtApi :: getStreamTime( void )
463 #if defined( HAVE_GETTIMEOFDAY )
464 // Return a very accurate estimate of the stream time by
465 // adding in the elapsed time since the last tick.
469 if ( stream_.state != STREAM_RUNNING || stream_.streamTime == 0.0 )
470 return stream_.streamTime;
472 gettimeofday( &now, NULL );
473 then = stream_.lastTickTimestamp;
474 return stream_.streamTime +
475 ((now.tv_sec + 0.000001 * now.tv_usec) -
476 (then.tv_sec + 0.000001 * then.tv_usec));
478 return stream_.streamTime;
482 void RtApi :: setStreamTime( double time )
487 stream_.streamTime = time;
488 #if defined( HAVE_GETTIMEOFDAY )
489 gettimeofday( &stream_.lastTickTimestamp, NULL );
493 unsigned int RtApi :: getStreamSampleRate( void )
497 return stream_.sampleRate;
501 // *************************************************** //
503 // OS/API-specific methods.
505 // *************************************************** //
507 #if defined(__MACOSX_CORE__)
509 // The OS X CoreAudio API is designed to use a separate callback
510 // procedure for each of its audio devices. A single RtAudio duplex
511 // stream using two different devices is supported here, though it
512 // cannot be guaranteed to always behave correctly because we cannot
513 // synchronize these two callbacks.
515 // A property listener is installed for over/underrun information.
516 // However, no functionality is currently provided to allow property
517 // listeners to trigger user handlers because it is unclear what could
518 // be done if a critical stream parameter (buffer size, sample rate,
519 // device disconnect) notification arrived. The listeners entail
520 // quite a bit of extra code and most likely, a user program wouldn't
521 // be prepared for the result anyway. However, we do provide a flag
522 // to the client callback function to inform of an over/underrun.
524 // A structure to hold various information related to the CoreAudio API
527 AudioDeviceID id[2]; // device ids
528 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
529 AudioDeviceIOProcID procId[2];
531 UInt32 iStream[2]; // device stream index (or first if using multiple)
532 UInt32 nStreams[2]; // number of streams to use
535 pthread_cond_t condition;
536 int drainCounter; // Tracks callback counts when draining
537 bool internalDrain; // Indicates if stop is initiated from callback or not.
540 :deviceBuffer(0), drainCounter(0), internalDrain(false) { nStreams[0] = 1; nStreams[1] = 1; id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
543 RtApiCore:: RtApiCore()
545 #if defined( AVAILABLE_MAC_OS_X_VERSION_10_6_AND_LATER )
546 // This is a largely undocumented but absolutely necessary
547 // requirement starting with OS-X 10.6. If not called, queries and
548 // updates to various audio device properties are not handled
550 CFRunLoopRef theRunLoop = NULL;
551 AudioObjectPropertyAddress property = { kAudioHardwarePropertyRunLoop,
552 kAudioObjectPropertyScopeGlobal,
553 kAudioObjectPropertyElementMaster };
554 OSStatus result = AudioObjectSetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, sizeof(CFRunLoopRef), &theRunLoop);
555 if ( result != noErr ) {
556 errorText_ = "RtApiCore::RtApiCore: error setting run loop property!";
557 error( RtAudioError::WARNING );
562 RtApiCore :: ~RtApiCore()
564 // The subclass destructor gets called before the base class
565 // destructor, so close an existing stream before deallocating
566 // apiDeviceId memory.
567 if ( stream_.state != STREAM_CLOSED ) closeStream();
570 unsigned int RtApiCore :: getDeviceCount( void )
572 // Find out how many audio devices there are, if any.
574 AudioObjectPropertyAddress propertyAddress = { kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
575 OSStatus result = AudioObjectGetPropertyDataSize( kAudioObjectSystemObject, &propertyAddress, 0, NULL, &dataSize );
576 if ( result != noErr ) {
577 errorText_ = "RtApiCore::getDeviceCount: OS-X error getting device info!";
578 error( RtAudioError::WARNING );
582 return dataSize / sizeof( AudioDeviceID );
585 unsigned int RtApiCore :: getDefaultInputDevice( void )
587 unsigned int nDevices = getDeviceCount();
588 if ( nDevices <= 1 ) return 0;
591 UInt32 dataSize = sizeof( AudioDeviceID );
592 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultInputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
593 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
594 if ( result != noErr ) {
595 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device.";
596 error( RtAudioError::WARNING );
600 dataSize *= nDevices;
601 AudioDeviceID deviceList[ nDevices ];
602 property.mSelector = kAudioHardwarePropertyDevices;
603 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
604 if ( result != noErr ) {
605 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device IDs.";
606 error( RtAudioError::WARNING );
610 for ( unsigned int i=0; i<nDevices; i++ )
611 if ( id == deviceList[i] ) return i;
613 errorText_ = "RtApiCore::getDefaultInputDevice: No default device found!";
614 error( RtAudioError::WARNING );
618 unsigned int RtApiCore :: getDefaultOutputDevice( void )
620 unsigned int nDevices = getDeviceCount();
621 if ( nDevices <= 1 ) return 0;
624 UInt32 dataSize = sizeof( AudioDeviceID );
625 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultOutputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
626 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
627 if ( result != noErr ) {
628 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device.";
629 error( RtAudioError::WARNING );
633 dataSize = sizeof( AudioDeviceID ) * nDevices;
634 AudioDeviceID deviceList[ nDevices ];
635 property.mSelector = kAudioHardwarePropertyDevices;
636 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
637 if ( result != noErr ) {
638 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device IDs.";
639 error( RtAudioError::WARNING );
643 for ( unsigned int i=0; i<nDevices; i++ )
644 if ( id == deviceList[i] ) return i;
646 errorText_ = "RtApiCore::getDefaultOutputDevice: No default device found!";
647 error( RtAudioError::WARNING );
651 RtAudio::DeviceInfo RtApiCore :: getDeviceInfo( unsigned int device )
653 RtAudio::DeviceInfo info;
657 unsigned int nDevices = getDeviceCount();
658 if ( nDevices == 0 ) {
659 errorText_ = "RtApiCore::getDeviceInfo: no devices found!";
660 error( RtAudioError::INVALID_USE );
664 if ( device >= nDevices ) {
665 errorText_ = "RtApiCore::getDeviceInfo: device ID is invalid!";
666 error( RtAudioError::INVALID_USE );
670 AudioDeviceID deviceList[ nDevices ];
671 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
672 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
673 kAudioObjectPropertyScopeGlobal,
674 kAudioObjectPropertyElementMaster };
675 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
676 0, NULL, &dataSize, (void *) &deviceList );
677 if ( result != noErr ) {
678 errorText_ = "RtApiCore::getDeviceInfo: OS-X system error getting device IDs.";
679 error( RtAudioError::WARNING );
683 AudioDeviceID id = deviceList[ device ];
685 // Get the device name.
688 dataSize = sizeof( CFStringRef );
689 property.mSelector = kAudioObjectPropertyManufacturer;
690 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
691 if ( result != noErr ) {
692 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device manufacturer.";
693 errorText_ = errorStream_.str();
694 error( RtAudioError::WARNING );
698 //const char *mname = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
699 int length = CFStringGetLength(cfname);
700 char *mname = (char *)malloc(length * 3 + 1);
701 #if defined( UNICODE ) || defined( _UNICODE )
702 CFStringGetCString(cfname, mname, length * 3 + 1, kCFStringEncodingUTF8);
704 CFStringGetCString(cfname, mname, length * 3 + 1, CFStringGetSystemEncoding());
706 info.name.append( (const char *)mname, strlen(mname) );
707 info.name.append( ": " );
711 property.mSelector = kAudioObjectPropertyName;
712 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
713 if ( result != noErr ) {
714 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device name.";
715 errorText_ = errorStream_.str();
716 error( RtAudioError::WARNING );
720 //const char *name = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
721 length = CFStringGetLength(cfname);
722 char *name = (char *)malloc(length * 3 + 1);
723 #if defined( UNICODE ) || defined( _UNICODE )
724 CFStringGetCString(cfname, name, length * 3 + 1, kCFStringEncodingUTF8);
726 CFStringGetCString(cfname, name, length * 3 + 1, CFStringGetSystemEncoding());
728 info.name.append( (const char *)name, strlen(name) );
732 // Get the output stream "configuration".
733 AudioBufferList *bufferList = nil;
734 property.mSelector = kAudioDevicePropertyStreamConfiguration;
735 property.mScope = kAudioDevicePropertyScopeOutput;
736 // property.mElement = kAudioObjectPropertyElementWildcard;
738 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
739 if ( result != noErr || dataSize == 0 ) {
740 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration info for device (" << device << ").";
741 errorText_ = errorStream_.str();
742 error( RtAudioError::WARNING );
746 // Allocate the AudioBufferList.
747 bufferList = (AudioBufferList *) malloc( dataSize );
748 if ( bufferList == NULL ) {
749 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating output AudioBufferList.";
750 error( RtAudioError::WARNING );
754 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
755 if ( result != noErr || dataSize == 0 ) {
757 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration for device (" << device << ").";
758 errorText_ = errorStream_.str();
759 error( RtAudioError::WARNING );
763 // Get output channel information.
764 unsigned int i, nStreams = bufferList->mNumberBuffers;
765 for ( i=0; i<nStreams; i++ )
766 info.outputChannels += bufferList->mBuffers[i].mNumberChannels;
769 // Get the input stream "configuration".
770 property.mScope = kAudioDevicePropertyScopeInput;
771 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
772 if ( result != noErr || dataSize == 0 ) {
773 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration info for device (" << device << ").";
774 errorText_ = errorStream_.str();
775 error( RtAudioError::WARNING );
779 // Allocate the AudioBufferList.
780 bufferList = (AudioBufferList *) malloc( dataSize );
781 if ( bufferList == NULL ) {
782 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating input AudioBufferList.";
783 error( RtAudioError::WARNING );
787 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
788 if (result != noErr || dataSize == 0) {
790 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration for device (" << device << ").";
791 errorText_ = errorStream_.str();
792 error( RtAudioError::WARNING );
796 // Get input channel information.
797 nStreams = bufferList->mNumberBuffers;
798 for ( i=0; i<nStreams; i++ )
799 info.inputChannels += bufferList->mBuffers[i].mNumberChannels;
802 // If device opens for both playback and capture, we determine the channels.
803 if ( info.outputChannels > 0 && info.inputChannels > 0 )
804 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
806 // Probe the device sample rates.
807 bool isInput = false;
808 if ( info.outputChannels == 0 ) isInput = true;
810 // Determine the supported sample rates.
811 property.mSelector = kAudioDevicePropertyAvailableNominalSampleRates;
812 if ( isInput == false ) property.mScope = kAudioDevicePropertyScopeOutput;
813 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
814 if ( result != kAudioHardwareNoError || dataSize == 0 ) {
815 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rate info.";
816 errorText_ = errorStream_.str();
817 error( RtAudioError::WARNING );
821 UInt32 nRanges = dataSize / sizeof( AudioValueRange );
822 AudioValueRange rangeList[ nRanges ];
823 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &rangeList );
824 if ( result != kAudioHardwareNoError ) {
825 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rates.";
826 errorText_ = errorStream_.str();
827 error( RtAudioError::WARNING );
831 // The sample rate reporting mechanism is a bit of a mystery. It
832 // seems that it can either return individual rates or a range of
833 // rates. I assume that if the min / max range values are the same,
834 // then that represents a single supported rate and if the min / max
835 // range values are different, the device supports an arbitrary
836 // range of values (though there might be multiple ranges, so we'll
837 // use the most conservative range).
838 Float64 minimumRate = 1.0, maximumRate = 10000000000.0;
839 bool haveValueRange = false;
840 info.sampleRates.clear();
841 for ( UInt32 i=0; i<nRanges; i++ ) {
842 if ( rangeList[i].mMinimum == rangeList[i].mMaximum ) {
843 unsigned int tmpSr = (unsigned int) rangeList[i].mMinimum;
844 info.sampleRates.push_back( tmpSr );
846 if ( !info.preferredSampleRate || ( tmpSr <= 48000 && tmpSr > info.preferredSampleRate ) )
847 info.preferredSampleRate = tmpSr;
850 haveValueRange = true;
851 if ( rangeList[i].mMinimum > minimumRate ) minimumRate = rangeList[i].mMinimum;
852 if ( rangeList[i].mMaximum < maximumRate ) maximumRate = rangeList[i].mMaximum;
856 if ( haveValueRange ) {
857 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
858 if ( SAMPLE_RATES[k] >= (unsigned int) minimumRate && SAMPLE_RATES[k] <= (unsigned int) maximumRate ) {
859 info.sampleRates.push_back( SAMPLE_RATES[k] );
861 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
862 info.preferredSampleRate = SAMPLE_RATES[k];
867 // Sort and remove any redundant values
868 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
869 info.sampleRates.erase( unique( info.sampleRates.begin(), info.sampleRates.end() ), info.sampleRates.end() );
871 if ( info.sampleRates.size() == 0 ) {
872 errorStream_ << "RtApiCore::probeDeviceInfo: No supported sample rates found for device (" << device << ").";
873 errorText_ = errorStream_.str();
874 error( RtAudioError::WARNING );
878 // CoreAudio always uses 32-bit floating point data for PCM streams.
879 // Thus, any other "physical" formats supported by the device are of
880 // no interest to the client.
881 info.nativeFormats = RTAUDIO_FLOAT32;
883 if ( info.outputChannels > 0 )
884 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
885 if ( info.inputChannels > 0 )
886 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
892 static OSStatus callbackHandler( AudioDeviceID inDevice,
893 const AudioTimeStamp* /*inNow*/,
894 const AudioBufferList* inInputData,
895 const AudioTimeStamp* /*inInputTime*/,
896 AudioBufferList* outOutputData,
897 const AudioTimeStamp* /*inOutputTime*/,
900 CallbackInfo *info = (CallbackInfo *) infoPointer;
902 RtApiCore *object = (RtApiCore *) info->object;
903 if ( object->callbackEvent( inDevice, inInputData, outOutputData ) == false )
904 return kAudioHardwareUnspecifiedError;
906 return kAudioHardwareNoError;
909 static OSStatus xrunListener( AudioObjectID /*inDevice*/,
911 const AudioObjectPropertyAddress properties[],
912 void* handlePointer )
914 CoreHandle *handle = (CoreHandle *) handlePointer;
915 for ( UInt32 i=0; i<nAddresses; i++ ) {
916 if ( properties[i].mSelector == kAudioDeviceProcessorOverload ) {
917 if ( properties[i].mScope == kAudioDevicePropertyScopeInput )
918 handle->xrun[1] = true;
920 handle->xrun[0] = true;
924 return kAudioHardwareNoError;
927 static OSStatus rateListener( AudioObjectID inDevice,
928 UInt32 /*nAddresses*/,
929 const AudioObjectPropertyAddress /*properties*/[],
932 Float64 *rate = (Float64 *) ratePointer;
933 UInt32 dataSize = sizeof( Float64 );
934 AudioObjectPropertyAddress property = { kAudioDevicePropertyNominalSampleRate,
935 kAudioObjectPropertyScopeGlobal,
936 kAudioObjectPropertyElementMaster };
937 AudioObjectGetPropertyData( inDevice, &property, 0, NULL, &dataSize, rate );
938 return kAudioHardwareNoError;
941 bool RtApiCore :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
942 unsigned int firstChannel, unsigned int sampleRate,
943 RtAudioFormat format, unsigned int *bufferSize,
944 RtAudio::StreamOptions *options )
947 unsigned int nDevices = getDeviceCount();
948 if ( nDevices == 0 ) {
949 // This should not happen because a check is made before this function is called.
950 errorText_ = "RtApiCore::probeDeviceOpen: no devices found!";
954 if ( device >= nDevices ) {
955 // This should not happen because a check is made before this function is called.
956 errorText_ = "RtApiCore::probeDeviceOpen: device ID is invalid!";
960 AudioDeviceID deviceList[ nDevices ];
961 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
962 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
963 kAudioObjectPropertyScopeGlobal,
964 kAudioObjectPropertyElementMaster };
965 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
966 0, NULL, &dataSize, (void *) &deviceList );
967 if ( result != noErr ) {
968 errorText_ = "RtApiCore::probeDeviceOpen: OS-X system error getting device IDs.";
972 AudioDeviceID id = deviceList[ device ];
974 // Setup for stream mode.
975 bool isInput = false;
976 if ( mode == INPUT ) {
978 property.mScope = kAudioDevicePropertyScopeInput;
981 property.mScope = kAudioDevicePropertyScopeOutput;
983 // Get the stream "configuration".
984 AudioBufferList *bufferList = nil;
986 property.mSelector = kAudioDevicePropertyStreamConfiguration;
987 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
988 if ( result != noErr || dataSize == 0 ) {
989 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration info for device (" << device << ").";
990 errorText_ = errorStream_.str();
994 // Allocate the AudioBufferList.
995 bufferList = (AudioBufferList *) malloc( dataSize );
996 if ( bufferList == NULL ) {
997 errorText_ = "RtApiCore::probeDeviceOpen: memory error allocating AudioBufferList.";
1001 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
1002 if (result != noErr || dataSize == 0) {
1004 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration for device (" << device << ").";
1005 errorText_ = errorStream_.str();
1009 // Search for one or more streams that contain the desired number of
1010 // channels. CoreAudio devices can have an arbitrary number of
1011 // streams and each stream can have an arbitrary number of channels.
1012 // For each stream, a single buffer of interleaved samples is
1013 // provided. RtAudio prefers the use of one stream of interleaved
1014 // data or multiple consecutive single-channel streams. However, we
1015 // now support multiple consecutive multi-channel streams of
1016 // interleaved data as well.
1017 UInt32 iStream, offsetCounter = firstChannel;
1018 UInt32 nStreams = bufferList->mNumberBuffers;
1019 bool monoMode = false;
1020 bool foundStream = false;
1022 // First check that the device supports the requested number of
1024 UInt32 deviceChannels = 0;
1025 for ( iStream=0; iStream<nStreams; iStream++ )
1026 deviceChannels += bufferList->mBuffers[iStream].mNumberChannels;
1028 if ( deviceChannels < ( channels + firstChannel ) ) {
1030 errorStream_ << "RtApiCore::probeDeviceOpen: the device (" << device << ") does not support the requested channel count.";
1031 errorText_ = errorStream_.str();
1035 // Look for a single stream meeting our needs.
1036 UInt32 firstStream, streamCount = 1, streamChannels = 0, channelOffset = 0;
1037 for ( iStream=0; iStream<nStreams; iStream++ ) {
1038 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
1039 if ( streamChannels >= channels + offsetCounter ) {
1040 firstStream = iStream;
1041 channelOffset = offsetCounter;
1045 if ( streamChannels > offsetCounter ) break;
1046 offsetCounter -= streamChannels;
1049 // If we didn't find a single stream above, then we should be able
1050 // to meet the channel specification with multiple streams.
1051 if ( foundStream == false ) {
1053 offsetCounter = firstChannel;
1054 for ( iStream=0; iStream<nStreams; iStream++ ) {
1055 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
1056 if ( streamChannels > offsetCounter ) break;
1057 offsetCounter -= streamChannels;
1060 firstStream = iStream;
1061 channelOffset = offsetCounter;
1062 Int32 channelCounter = channels + offsetCounter - streamChannels;
1064 if ( streamChannels > 1 ) monoMode = false;
1065 while ( channelCounter > 0 ) {
1066 streamChannels = bufferList->mBuffers[++iStream].mNumberChannels;
1067 if ( streamChannels > 1 ) monoMode = false;
1068 channelCounter -= streamChannels;
1075 // Determine the buffer size.
1076 AudioValueRange bufferRange;
1077 dataSize = sizeof( AudioValueRange );
1078 property.mSelector = kAudioDevicePropertyBufferFrameSizeRange;
1079 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &bufferRange );
1081 if ( result != noErr ) {
1082 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting buffer size range for device (" << device << ").";
1083 errorText_ = errorStream_.str();
1087 if ( bufferRange.mMinimum > *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMinimum;
1088 else if ( bufferRange.mMaximum < *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMaximum;
1089 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) *bufferSize = (unsigned long) bufferRange.mMinimum;
1091 // Set the buffer size. For multiple streams, I'm assuming we only
1092 // need to make this setting for the master channel.
1093 UInt32 theSize = (UInt32) *bufferSize;
1094 dataSize = sizeof( UInt32 );
1095 property.mSelector = kAudioDevicePropertyBufferFrameSize;
1096 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &theSize );
1098 if ( result != noErr ) {
1099 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting the buffer size for device (" << device << ").";
1100 errorText_ = errorStream_.str();
1104 // If attempting to setup a duplex stream, the bufferSize parameter
1105 // MUST be the same in both directions!
1106 *bufferSize = theSize;
1107 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
1108 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << device << ").";
1109 errorText_ = errorStream_.str();
1113 stream_.bufferSize = *bufferSize;
1114 stream_.nBuffers = 1;
1116 // Try to set "hog" mode ... it's not clear to me this is working.
1117 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) {
1119 dataSize = sizeof( hog_pid );
1120 property.mSelector = kAudioDevicePropertyHogMode;
1121 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &hog_pid );
1122 if ( result != noErr ) {
1123 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting 'hog' state!";
1124 errorText_ = errorStream_.str();
1128 if ( hog_pid != getpid() ) {
1130 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &hog_pid );
1131 if ( result != noErr ) {
1132 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting 'hog' state!";
1133 errorText_ = errorStream_.str();
1139 // Check and if necessary, change the sample rate for the device.
1140 Float64 nominalRate;
1141 dataSize = sizeof( Float64 );
1142 property.mSelector = kAudioDevicePropertyNominalSampleRate;
1143 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &nominalRate );
1144 if ( result != noErr ) {
1145 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting current sample rate.";
1146 errorText_ = errorStream_.str();
1150 // Only change the sample rate if off by more than 1 Hz.
1151 if ( fabs( nominalRate - (double)sampleRate ) > 1.0 ) {
1153 // Set a property listener for the sample rate change
1154 Float64 reportedRate = 0.0;
1155 AudioObjectPropertyAddress tmp = { kAudioDevicePropertyNominalSampleRate, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
1156 result = AudioObjectAddPropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1157 if ( result != noErr ) {
1158 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate property listener for device (" << device << ").";
1159 errorText_ = errorStream_.str();
1163 nominalRate = (Float64) sampleRate;
1164 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &nominalRate );
1165 if ( result != noErr ) {
1166 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1167 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate for device (" << device << ").";
1168 errorText_ = errorStream_.str();
1172 // Now wait until the reported nominal rate is what we just set.
1173 UInt32 microCounter = 0;
1174 while ( reportedRate != nominalRate ) {
1175 microCounter += 5000;
1176 if ( microCounter > 5000000 ) break;
1180 // Remove the property listener.
1181 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1183 if ( microCounter > 5000000 ) {
1184 errorStream_ << "RtApiCore::probeDeviceOpen: timeout waiting for sample rate update for device (" << device << ").";
1185 errorText_ = errorStream_.str();
1190 // Now set the stream format for all streams. Also, check the
1191 // physical format of the device and change that if necessary.
1192 AudioStreamBasicDescription description;
1193 dataSize = sizeof( AudioStreamBasicDescription );
1194 property.mSelector = kAudioStreamPropertyVirtualFormat;
1195 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
1196 if ( result != noErr ) {
1197 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream format for device (" << device << ").";
1198 errorText_ = errorStream_.str();
1202 // Set the sample rate and data format id. However, only make the
1203 // change if the sample rate is not within 1.0 of the desired
1204 // rate and the format is not linear pcm.
1205 bool updateFormat = false;
1206 if ( fabs( description.mSampleRate - (Float64)sampleRate ) > 1.0 ) {
1207 description.mSampleRate = (Float64) sampleRate;
1208 updateFormat = true;
1211 if ( description.mFormatID != kAudioFormatLinearPCM ) {
1212 description.mFormatID = kAudioFormatLinearPCM;
1213 updateFormat = true;
1216 if ( updateFormat ) {
1217 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &description );
1218 if ( result != noErr ) {
1219 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate or data format for device (" << device << ").";
1220 errorText_ = errorStream_.str();
1225 // Now check the physical format.
1226 property.mSelector = kAudioStreamPropertyPhysicalFormat;
1227 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
1228 if ( result != noErr ) {
1229 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream physical format for device (" << device << ").";
1230 errorText_ = errorStream_.str();
1234 //std::cout << "Current physical stream format:" << std::endl;
1235 //std::cout << " mBitsPerChan = " << description.mBitsPerChannel << std::endl;
1236 //std::cout << " aligned high = " << (description.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (description.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
1237 //std::cout << " bytesPerFrame = " << description.mBytesPerFrame << std::endl;
1238 //std::cout << " sample rate = " << description.mSampleRate << std::endl;
1240 if ( description.mFormatID != kAudioFormatLinearPCM || description.mBitsPerChannel < 16 ) {
1241 description.mFormatID = kAudioFormatLinearPCM;
1242 //description.mSampleRate = (Float64) sampleRate;
1243 AudioStreamBasicDescription testDescription = description;
1246 // We'll try higher bit rates first and then work our way down.
1247 std::vector< std::pair<UInt32, UInt32> > physicalFormats;
1248 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsFloat) & ~kLinearPCMFormatFlagIsSignedInteger;
1249 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
1250 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
1251 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
1252 physicalFormats.push_back( std::pair<Float32, UInt32>( 24, formatFlags ) ); // 24-bit packed
1253 formatFlags &= ~( kAudioFormatFlagIsPacked | kAudioFormatFlagIsAlignedHigh );
1254 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.2, formatFlags ) ); // 24-bit in 4 bytes, aligned low
1255 formatFlags |= kAudioFormatFlagIsAlignedHigh;
1256 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.4, formatFlags ) ); // 24-bit in 4 bytes, aligned high
1257 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
1258 physicalFormats.push_back( std::pair<Float32, UInt32>( 16, formatFlags ) );
1259 physicalFormats.push_back( std::pair<Float32, UInt32>( 8, formatFlags ) );
1261 bool setPhysicalFormat = false;
1262 for( unsigned int i=0; i<physicalFormats.size(); i++ ) {
1263 testDescription = description;
1264 testDescription.mBitsPerChannel = (UInt32) physicalFormats[i].first;
1265 testDescription.mFormatFlags = physicalFormats[i].second;
1266 if ( (24 == (UInt32)physicalFormats[i].first) && ~( physicalFormats[i].second & kAudioFormatFlagIsPacked ) )
1267 testDescription.mBytesPerFrame = 4 * testDescription.mChannelsPerFrame;
1269 testDescription.mBytesPerFrame = testDescription.mBitsPerChannel/8 * testDescription.mChannelsPerFrame;
1270 testDescription.mBytesPerPacket = testDescription.mBytesPerFrame * testDescription.mFramesPerPacket;
1271 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &testDescription );
1272 if ( result == noErr ) {
1273 setPhysicalFormat = true;
1274 //std::cout << "Updated physical stream format:" << std::endl;
1275 //std::cout << " mBitsPerChan = " << testDescription.mBitsPerChannel << std::endl;
1276 //std::cout << " aligned high = " << (testDescription.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (testDescription.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
1277 //std::cout << " bytesPerFrame = " << testDescription.mBytesPerFrame << std::endl;
1278 //std::cout << " sample rate = " << testDescription.mSampleRate << std::endl;
1283 if ( !setPhysicalFormat ) {
1284 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting physical data format for device (" << device << ").";
1285 errorText_ = errorStream_.str();
1288 } // done setting virtual/physical formats.
1290 // Get the stream / device latency.
1292 dataSize = sizeof( UInt32 );
1293 property.mSelector = kAudioDevicePropertyLatency;
1294 if ( AudioObjectHasProperty( id, &property ) == true ) {
1295 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &latency );
1296 if ( result == kAudioHardwareNoError ) stream_.latency[ mode ] = latency;
1298 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting device latency for device (" << device << ").";
1299 errorText_ = errorStream_.str();
1300 error( RtAudioError::WARNING );
1304 // Byte-swapping: According to AudioHardware.h, the stream data will
1305 // always be presented in native-endian format, so we should never
1306 // need to byte swap.
1307 stream_.doByteSwap[mode] = false;
1309 // From the CoreAudio documentation, PCM data must be supplied as
1311 stream_.userFormat = format;
1312 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
1314 if ( streamCount == 1 )
1315 stream_.nDeviceChannels[mode] = description.mChannelsPerFrame;
1316 else // multiple streams
1317 stream_.nDeviceChannels[mode] = channels;
1318 stream_.nUserChannels[mode] = channels;
1319 stream_.channelOffset[mode] = channelOffset; // offset within a CoreAudio stream
1320 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
1321 else stream_.userInterleaved = true;
1322 stream_.deviceInterleaved[mode] = true;
1323 if ( monoMode == true ) stream_.deviceInterleaved[mode] = false;
1325 // Set flags for buffer conversion.
1326 stream_.doConvertBuffer[mode] = false;
1327 if ( stream_.userFormat != stream_.deviceFormat[mode] )
1328 stream_.doConvertBuffer[mode] = true;
1329 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
1330 stream_.doConvertBuffer[mode] = true;
1331 if ( streamCount == 1 ) {
1332 if ( stream_.nUserChannels[mode] > 1 &&
1333 stream_.userInterleaved != stream_.deviceInterleaved[mode] )
1334 stream_.doConvertBuffer[mode] = true;
1336 else if ( monoMode && stream_.userInterleaved )
1337 stream_.doConvertBuffer[mode] = true;
1339 // Allocate our CoreHandle structure for the stream.
1340 CoreHandle *handle = 0;
1341 if ( stream_.apiHandle == 0 ) {
1343 handle = new CoreHandle;
1345 catch ( std::bad_alloc& ) {
1346 errorText_ = "RtApiCore::probeDeviceOpen: error allocating CoreHandle memory.";
1350 if ( pthread_cond_init( &handle->condition, NULL ) ) {
1351 errorText_ = "RtApiCore::probeDeviceOpen: error initializing pthread condition variable.";
1354 stream_.apiHandle = (void *) handle;
1357 handle = (CoreHandle *) stream_.apiHandle;
1358 handle->iStream[mode] = firstStream;
1359 handle->nStreams[mode] = streamCount;
1360 handle->id[mode] = id;
1362 // Allocate necessary internal buffers.
1363 unsigned long bufferBytes;
1364 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
1365 // stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
1366 stream_.userBuffer[mode] = (char *) malloc( bufferBytes * sizeof(char) );
1367 memset( stream_.userBuffer[mode], 0, bufferBytes * sizeof(char) );
1368 if ( stream_.userBuffer[mode] == NULL ) {
1369 errorText_ = "RtApiCore::probeDeviceOpen: error allocating user buffer memory.";
1373 // If possible, we will make use of the CoreAudio stream buffers as
1374 // "device buffers". However, we can't do this if using multiple
1376 if ( stream_.doConvertBuffer[mode] && handle->nStreams[mode] > 1 ) {
1378 bool makeBuffer = true;
1379 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
1380 if ( mode == INPUT ) {
1381 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
1382 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
1383 if ( bufferBytes <= bytesOut ) makeBuffer = false;
1388 bufferBytes *= *bufferSize;
1389 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
1390 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
1391 if ( stream_.deviceBuffer == NULL ) {
1392 errorText_ = "RtApiCore::probeDeviceOpen: error allocating device buffer memory.";
1398 stream_.sampleRate = sampleRate;
1399 stream_.device[mode] = device;
1400 stream_.state = STREAM_STOPPED;
1401 stream_.callbackInfo.object = (void *) this;
1403 // Setup the buffer conversion information structure.
1404 if ( stream_.doConvertBuffer[mode] ) {
1405 if ( streamCount > 1 ) setConvertInfo( mode, 0 );
1406 else setConvertInfo( mode, channelOffset );
1409 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device )
1410 // Only one callback procedure per device.
1411 stream_.mode = DUPLEX;
1413 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1414 result = AudioDeviceCreateIOProcID( id, callbackHandler, (void *) &stream_.callbackInfo, &handle->procId[mode] );
1416 // deprecated in favor of AudioDeviceCreateIOProcID()
1417 result = AudioDeviceAddIOProc( id, callbackHandler, (void *) &stream_.callbackInfo );
1419 if ( result != noErr ) {
1420 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting callback for device (" << device << ").";
1421 errorText_ = errorStream_.str();
1424 if ( stream_.mode == OUTPUT && mode == INPUT )
1425 stream_.mode = DUPLEX;
1427 stream_.mode = mode;
1430 // Setup the device property listener for over/underload.
1431 property.mSelector = kAudioDeviceProcessorOverload;
1432 property.mScope = kAudioObjectPropertyScopeGlobal;
1433 result = AudioObjectAddPropertyListener( id, &property, xrunListener, (void *) handle );
1439 pthread_cond_destroy( &handle->condition );
1441 stream_.apiHandle = 0;
1444 for ( int i=0; i<2; i++ ) {
1445 if ( stream_.userBuffer[i] ) {
1446 free( stream_.userBuffer[i] );
1447 stream_.userBuffer[i] = 0;
1451 if ( stream_.deviceBuffer ) {
1452 free( stream_.deviceBuffer );
1453 stream_.deviceBuffer = 0;
1456 stream_.state = STREAM_CLOSED;
1460 void RtApiCore :: closeStream( void )
1462 if ( stream_.state == STREAM_CLOSED ) {
1463 errorText_ = "RtApiCore::closeStream(): no open stream to close!";
1464 error( RtAudioError::WARNING );
1468 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1469 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1471 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
1472 kAudioObjectPropertyScopeGlobal,
1473 kAudioObjectPropertyElementMaster };
1475 property.mSelector = kAudioDeviceProcessorOverload;
1476 property.mScope = kAudioObjectPropertyScopeGlobal;
1477 if (AudioObjectRemovePropertyListener( handle->id[0], &property, xrunListener, (void *) handle ) != noErr) {
1478 errorText_ = "RtApiCore::closeStream(): error removing property listener!";
1479 error( RtAudioError::WARNING );
1482 if ( stream_.state == STREAM_RUNNING ) {
1483 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1484 AudioDeviceStop( handle->id[0], handle->procId[0] );
1485 AudioDeviceDestroyIOProcID( handle->id[0], handle->procId[0] );
1486 #else // deprecated behaviour
1487 AudioDeviceStop( handle->id[0], callbackHandler );
1488 AudioDeviceRemoveIOProc( handle->id[0], callbackHandler );
1493 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1495 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
1496 kAudioObjectPropertyScopeGlobal,
1497 kAudioObjectPropertyElementMaster };
1499 property.mSelector = kAudioDeviceProcessorOverload;
1500 property.mScope = kAudioObjectPropertyScopeGlobal;
1501 if (AudioObjectRemovePropertyListener( handle->id[1], &property, xrunListener, (void *) handle ) != noErr) {
1502 errorText_ = "RtApiCore::closeStream(): error removing property listener!";
1503 error( RtAudioError::WARNING );
1506 if ( stream_.state == STREAM_RUNNING ) {
1507 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1508 AudioDeviceStop( handle->id[1], handle->procId[1] );
1509 AudioDeviceDestroyIOProcID( handle->id[1], handle->procId[1] );
1510 #else // deprecated behaviour
1511 AudioDeviceStop( handle->id[1], callbackHandler );
1512 AudioDeviceRemoveIOProc( handle->id[1], callbackHandler );
1517 for ( int i=0; i<2; i++ ) {
1518 if ( stream_.userBuffer[i] ) {
1519 free( stream_.userBuffer[i] );
1520 stream_.userBuffer[i] = 0;
1524 if ( stream_.deviceBuffer ) {
1525 free( stream_.deviceBuffer );
1526 stream_.deviceBuffer = 0;
1529 // Destroy pthread condition variable.
1530 pthread_cond_destroy( &handle->condition );
1532 stream_.apiHandle = 0;
1534 stream_.mode = UNINITIALIZED;
1535 stream_.state = STREAM_CLOSED;
1538 void RtApiCore :: startStream( void )
1541 if ( stream_.state == STREAM_RUNNING ) {
1542 errorText_ = "RtApiCore::startStream(): the stream is already running!";
1543 error( RtAudioError::WARNING );
1547 #if defined( HAVE_GETTIMEOFDAY )
1548 gettimeofday( &stream_.lastTickTimestamp, NULL );
1551 OSStatus result = noErr;
1552 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1553 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1555 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1556 result = AudioDeviceStart( handle->id[0], handle->procId[0] );
1557 #else // deprecated behaviour
1558 result = AudioDeviceStart( handle->id[0], callbackHandler );
1560 if ( result != noErr ) {
1561 errorStream_ << "RtApiCore::startStream: system error (" << getErrorCode( result ) << ") starting callback procedure on device (" << stream_.device[0] << ").";
1562 errorText_ = errorStream_.str();
1567 if ( stream_.mode == INPUT ||
1568 ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1570 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1571 result = AudioDeviceStart( handle->id[1], handle->procId[1] );
1572 #else // deprecated behaviour
1573 result = AudioDeviceStart( handle->id[1], callbackHandler );
1575 if ( result != noErr ) {
1576 errorStream_ << "RtApiCore::startStream: system error starting input callback procedure on device (" << stream_.device[1] << ").";
1577 errorText_ = errorStream_.str();
1582 handle->drainCounter = 0;
1583 handle->internalDrain = false;
1584 stream_.state = STREAM_RUNNING;
1587 if ( result == noErr ) return;
1588 error( RtAudioError::SYSTEM_ERROR );
1591 void RtApiCore :: stopStream( void )
1594 if ( stream_.state == STREAM_STOPPED ) {
1595 errorText_ = "RtApiCore::stopStream(): the stream is already stopped!";
1596 error( RtAudioError::WARNING );
1600 OSStatus result = noErr;
1601 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1602 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1604 if ( handle->drainCounter == 0 ) {
1605 handle->drainCounter = 2;
1606 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
1609 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1610 result = AudioDeviceStop( handle->id[0], handle->procId[0] );
1611 #else // deprecated behaviour
1612 result = AudioDeviceStop( handle->id[0], callbackHandler );
1614 if ( result != noErr ) {
1615 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping callback procedure on device (" << stream_.device[0] << ").";
1616 errorText_ = errorStream_.str();
1621 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1623 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1624 result = AudioDeviceStop( handle->id[0], handle->procId[1] );
1625 #else // deprecated behaviour
1626 result = AudioDeviceStop( handle->id[1], callbackHandler );
1628 if ( result != noErr ) {
1629 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping input callback procedure on device (" << stream_.device[1] << ").";
1630 errorText_ = errorStream_.str();
1635 stream_.state = STREAM_STOPPED;
1638 if ( result == noErr ) return;
1639 error( RtAudioError::SYSTEM_ERROR );
1642 void RtApiCore :: abortStream( void )
1645 if ( stream_.state == STREAM_STOPPED ) {
1646 errorText_ = "RtApiCore::abortStream(): the stream is already stopped!";
1647 error( RtAudioError::WARNING );
1651 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1652 handle->drainCounter = 2;
1657 // This function will be called by a spawned thread when the user
1658 // callback function signals that the stream should be stopped or
1659 // aborted. It is better to handle it this way because the
1660 // callbackEvent() function probably should return before the AudioDeviceStop()
1661 // function is called.
1662 static void *coreStopStream( void *ptr )
1664 CallbackInfo *info = (CallbackInfo *) ptr;
1665 RtApiCore *object = (RtApiCore *) info->object;
1667 object->stopStream();
1668 pthread_exit( NULL );
1671 bool RtApiCore :: callbackEvent( AudioDeviceID deviceId,
1672 const AudioBufferList *inBufferList,
1673 const AudioBufferList *outBufferList )
1675 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
1676 if ( stream_.state == STREAM_CLOSED ) {
1677 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
1678 error( RtAudioError::WARNING );
1682 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
1683 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1685 // Check if we were draining the stream and signal is finished.
1686 if ( handle->drainCounter > 3 ) {
1687 ThreadHandle threadId;
1689 stream_.state = STREAM_STOPPING;
1690 if ( handle->internalDrain == true )
1691 pthread_create( &threadId, NULL, coreStopStream, info );
1692 else // external call to stopStream()
1693 pthread_cond_signal( &handle->condition );
1697 AudioDeviceID outputDevice = handle->id[0];
1699 // Invoke user callback to get fresh output data UNLESS we are
1700 // draining stream or duplex mode AND the input/output devices are
1701 // different AND this function is called for the input device.
1702 if ( handle->drainCounter == 0 && ( stream_.mode != DUPLEX || deviceId == outputDevice ) ) {
1703 RtAudioCallback callback = (RtAudioCallback) info->callback;
1704 double streamTime = getStreamTime();
1705 RtAudioStreamStatus status = 0;
1706 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
1707 status |= RTAUDIO_OUTPUT_UNDERFLOW;
1708 handle->xrun[0] = false;
1710 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
1711 status |= RTAUDIO_INPUT_OVERFLOW;
1712 handle->xrun[1] = false;
1715 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
1716 stream_.bufferSize, streamTime, status, info->userData );
1717 if ( cbReturnValue == 2 ) {
1718 stream_.state = STREAM_STOPPING;
1719 handle->drainCounter = 2;
1723 else if ( cbReturnValue == 1 ) {
1724 handle->drainCounter = 1;
1725 handle->internalDrain = true;
1729 if ( stream_.mode == OUTPUT || ( stream_.mode == DUPLEX && deviceId == outputDevice ) ) {
1731 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
1733 if ( handle->nStreams[0] == 1 ) {
1734 memset( outBufferList->mBuffers[handle->iStream[0]].mData,
1736 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
1738 else { // fill multiple streams with zeros
1739 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
1740 memset( outBufferList->mBuffers[handle->iStream[0]+i].mData,
1742 outBufferList->mBuffers[handle->iStream[0]+i].mDataByteSize );
1746 else if ( handle->nStreams[0] == 1 ) {
1747 if ( stream_.doConvertBuffer[0] ) { // convert directly to CoreAudio stream buffer
1748 convertBuffer( (char *) outBufferList->mBuffers[handle->iStream[0]].mData,
1749 stream_.userBuffer[0], stream_.convertInfo[0] );
1751 else { // copy from user buffer
1752 memcpy( outBufferList->mBuffers[handle->iStream[0]].mData,
1753 stream_.userBuffer[0],
1754 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
1757 else { // fill multiple streams
1758 Float32 *inBuffer = (Float32 *) stream_.userBuffer[0];
1759 if ( stream_.doConvertBuffer[0] ) {
1760 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
1761 inBuffer = (Float32 *) stream_.deviceBuffer;
1764 if ( stream_.deviceInterleaved[0] == false ) { // mono mode
1765 UInt32 bufferBytes = outBufferList->mBuffers[handle->iStream[0]].mDataByteSize;
1766 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
1767 memcpy( outBufferList->mBuffers[handle->iStream[0]+i].mData,
1768 (void *)&inBuffer[i*stream_.bufferSize], bufferBytes );
1771 else { // fill multiple multi-channel streams with interleaved data
1772 UInt32 streamChannels, channelsLeft, inJump, outJump, inOffset;
1775 bool inInterleaved = ( stream_.userInterleaved ) ? true : false;
1776 UInt32 inChannels = stream_.nUserChannels[0];
1777 if ( stream_.doConvertBuffer[0] ) {
1778 inInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
1779 inChannels = stream_.nDeviceChannels[0];
1782 if ( inInterleaved ) inOffset = 1;
1783 else inOffset = stream_.bufferSize;
1785 channelsLeft = inChannels;
1786 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
1788 out = (Float32 *) outBufferList->mBuffers[handle->iStream[0]+i].mData;
1789 streamChannels = outBufferList->mBuffers[handle->iStream[0]+i].mNumberChannels;
1792 // Account for possible channel offset in first stream
1793 if ( i == 0 && stream_.channelOffset[0] > 0 ) {
1794 streamChannels -= stream_.channelOffset[0];
1795 outJump = stream_.channelOffset[0];
1799 // Account for possible unfilled channels at end of the last stream
1800 if ( streamChannels > channelsLeft ) {
1801 outJump = streamChannels - channelsLeft;
1802 streamChannels = channelsLeft;
1805 // Determine input buffer offsets and skips
1806 if ( inInterleaved ) {
1807 inJump = inChannels;
1808 in += inChannels - channelsLeft;
1812 in += (inChannels - channelsLeft) * inOffset;
1815 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
1816 for ( unsigned int j=0; j<streamChannels; j++ ) {
1817 *out++ = in[j*inOffset];
1822 channelsLeft -= streamChannels;
1828 // Don't bother draining input
1829 if ( handle->drainCounter ) {
1830 handle->drainCounter++;
1834 AudioDeviceID inputDevice;
1835 inputDevice = handle->id[1];
1836 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && deviceId == inputDevice ) ) {
1838 if ( handle->nStreams[1] == 1 ) {
1839 if ( stream_.doConvertBuffer[1] ) { // convert directly from CoreAudio stream buffer
1840 convertBuffer( stream_.userBuffer[1],
1841 (char *) inBufferList->mBuffers[handle->iStream[1]].mData,
1842 stream_.convertInfo[1] );
1844 else { // copy to user buffer
1845 memcpy( stream_.userBuffer[1],
1846 inBufferList->mBuffers[handle->iStream[1]].mData,
1847 inBufferList->mBuffers[handle->iStream[1]].mDataByteSize );
1850 else { // read from multiple streams
1851 Float32 *outBuffer = (Float32 *) stream_.userBuffer[1];
1852 if ( stream_.doConvertBuffer[1] ) outBuffer = (Float32 *) stream_.deviceBuffer;
1854 if ( stream_.deviceInterleaved[1] == false ) { // mono mode
1855 UInt32 bufferBytes = inBufferList->mBuffers[handle->iStream[1]].mDataByteSize;
1856 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
1857 memcpy( (void *)&outBuffer[i*stream_.bufferSize],
1858 inBufferList->mBuffers[handle->iStream[1]+i].mData, bufferBytes );
1861 else { // read from multiple multi-channel streams
1862 UInt32 streamChannels, channelsLeft, inJump, outJump, outOffset;
1865 bool outInterleaved = ( stream_.userInterleaved ) ? true : false;
1866 UInt32 outChannels = stream_.nUserChannels[1];
1867 if ( stream_.doConvertBuffer[1] ) {
1868 outInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
1869 outChannels = stream_.nDeviceChannels[1];
1872 if ( outInterleaved ) outOffset = 1;
1873 else outOffset = stream_.bufferSize;
1875 channelsLeft = outChannels;
1876 for ( unsigned int i=0; i<handle->nStreams[1]; i++ ) {
1878 in = (Float32 *) inBufferList->mBuffers[handle->iStream[1]+i].mData;
1879 streamChannels = inBufferList->mBuffers[handle->iStream[1]+i].mNumberChannels;
1882 // Account for possible channel offset in first stream
1883 if ( i == 0 && stream_.channelOffset[1] > 0 ) {
1884 streamChannels -= stream_.channelOffset[1];
1885 inJump = stream_.channelOffset[1];
1889 // Account for possible unread channels at end of the last stream
1890 if ( streamChannels > channelsLeft ) {
1891 inJump = streamChannels - channelsLeft;
1892 streamChannels = channelsLeft;
1895 // Determine output buffer offsets and skips
1896 if ( outInterleaved ) {
1897 outJump = outChannels;
1898 out += outChannels - channelsLeft;
1902 out += (outChannels - channelsLeft) * outOffset;
1905 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
1906 for ( unsigned int j=0; j<streamChannels; j++ ) {
1907 out[j*outOffset] = *in++;
1912 channelsLeft -= streamChannels;
1916 if ( stream_.doConvertBuffer[1] ) { // convert from our internal "device" buffer
1917 convertBuffer( stream_.userBuffer[1],
1918 stream_.deviceBuffer,
1919 stream_.convertInfo[1] );
1925 //MUTEX_UNLOCK( &stream_.mutex );
1927 // Make sure to only tick duplex stream time once if using two devices
1928 if ( stream_.mode != DUPLEX || (stream_.mode == DUPLEX && handle->id[0] != handle->id[1] && deviceId == handle->id[0] ) )
1929 RtApi::tickStreamTime();
1934 const char* RtApiCore :: getErrorCode( OSStatus code )
1938 case kAudioHardwareNotRunningError:
1939 return "kAudioHardwareNotRunningError";
1941 case kAudioHardwareUnspecifiedError:
1942 return "kAudioHardwareUnspecifiedError";
1944 case kAudioHardwareUnknownPropertyError:
1945 return "kAudioHardwareUnknownPropertyError";
1947 case kAudioHardwareBadPropertySizeError:
1948 return "kAudioHardwareBadPropertySizeError";
1950 case kAudioHardwareIllegalOperationError:
1951 return "kAudioHardwareIllegalOperationError";
1953 case kAudioHardwareBadObjectError:
1954 return "kAudioHardwareBadObjectError";
1956 case kAudioHardwareBadDeviceError:
1957 return "kAudioHardwareBadDeviceError";
1959 case kAudioHardwareBadStreamError:
1960 return "kAudioHardwareBadStreamError";
1962 case kAudioHardwareUnsupportedOperationError:
1963 return "kAudioHardwareUnsupportedOperationError";
1965 case kAudioDeviceUnsupportedFormatError:
1966 return "kAudioDeviceUnsupportedFormatError";
1968 case kAudioDevicePermissionsError:
1969 return "kAudioDevicePermissionsError";
1972 return "CoreAudio unknown error";
1976 //******************** End of __MACOSX_CORE__ *********************//
1979 #if defined(__UNIX_JACK__)
1981 // JACK is a low-latency audio server, originally written for the
1982 // GNU/Linux operating system and now also ported to OS-X. It can
1983 // connect a number of different applications to an audio device, as
1984 // well as allowing them to share audio between themselves.
1986 // When using JACK with RtAudio, "devices" refer to JACK clients that
1987 // have ports connected to the server. The JACK server is typically
1988 // started in a terminal as follows:
1990 // .jackd -d alsa -d hw:0
1992 // or through an interface program such as qjackctl. Many of the
1993 // parameters normally set for a stream are fixed by the JACK server
1994 // and can be specified when the JACK server is started. In
1997 // .jackd -d alsa -d hw:0 -r 44100 -p 512 -n 4
1999 // specifies a sample rate of 44100 Hz, a buffer size of 512 sample
2000 // frames, and number of buffers = 4. Once the server is running, it
2001 // is not possible to override these values. If the values are not
2002 // specified in the command-line, the JACK server uses default values.
2004 // The JACK server does not have to be running when an instance of
2005 // RtApiJack is created, though the function getDeviceCount() will
2006 // report 0 devices found until JACK has been started. When no
2007 // devices are available (i.e., the JACK server is not running), a
2008 // stream cannot be opened.
2010 #include <jack/jack.h>
2014 // A structure to hold various information related to the Jack API
2017 jack_client_t *client;
2018 jack_port_t **ports[2];
2019 std::string deviceName[2];
2021 pthread_cond_t condition;
2022 int drainCounter; // Tracks callback counts when draining
2023 bool internalDrain; // Indicates if stop is initiated from callback or not.
2026 :client(0), drainCounter(0), internalDrain(false) { ports[0] = 0; ports[1] = 0; xrun[0] = false; xrun[1] = false; }
2029 #if !defined(__RTAUDIO_DEBUG__)
2030 static void jackSilentError( const char * ) {};
2033 RtApiJack :: RtApiJack()
2034 :shouldAutoconnect_(true) {
2035 // Nothing to do here.
2036 #if !defined(__RTAUDIO_DEBUG__)
2037 // Turn off Jack's internal error reporting.
2038 jack_set_error_function( &jackSilentError );
2042 RtApiJack :: ~RtApiJack()
2044 if ( stream_.state != STREAM_CLOSED ) closeStream();
2047 unsigned int RtApiJack :: getDeviceCount( void )
2049 // See if we can become a jack client.
2050 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
2051 jack_status_t *status = NULL;
2052 jack_client_t *client = jack_client_open( "RtApiJackCount", options, status );
2053 if ( client == 0 ) return 0;
2056 std::string port, previousPort;
2057 unsigned int nChannels = 0, nDevices = 0;
2058 ports = jack_get_ports( client, NULL, JACK_DEFAULT_AUDIO_TYPE, 0 );
2060 // Parse the port names up to the first colon (:).
2063 port = (char *) ports[ nChannels ];
2064 iColon = port.find(":");
2065 if ( iColon != std::string::npos ) {
2066 port = port.substr( 0, iColon + 1 );
2067 if ( port != previousPort ) {
2069 previousPort = port;
2072 } while ( ports[++nChannels] );
2076 jack_client_close( client );
2080 RtAudio::DeviceInfo RtApiJack :: getDeviceInfo( unsigned int device )
2082 RtAudio::DeviceInfo info;
2083 info.probed = false;
2085 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption
2086 jack_status_t *status = NULL;
2087 jack_client_t *client = jack_client_open( "RtApiJackInfo", options, status );
2088 if ( client == 0 ) {
2089 errorText_ = "RtApiJack::getDeviceInfo: Jack server not found or connection error!";
2090 error( RtAudioError::WARNING );
2095 std::string port, previousPort;
2096 unsigned int nPorts = 0, nDevices = 0;
2097 ports = jack_get_ports( client, NULL, JACK_DEFAULT_AUDIO_TYPE, 0 );
2099 // Parse the port names up to the first colon (:).
2102 port = (char *) ports[ nPorts ];
2103 iColon = port.find(":");
2104 if ( iColon != std::string::npos ) {
2105 port = port.substr( 0, iColon );
2106 if ( port != previousPort ) {
2107 if ( nDevices == device ) info.name = port;
2109 previousPort = port;
2112 } while ( ports[++nPorts] );
2116 if ( device >= nDevices ) {
2117 jack_client_close( client );
2118 errorText_ = "RtApiJack::getDeviceInfo: device ID is invalid!";
2119 error( RtAudioError::INVALID_USE );
2123 // Get the current jack server sample rate.
2124 info.sampleRates.clear();
2126 info.preferredSampleRate = jack_get_sample_rate( client );
2127 info.sampleRates.push_back( info.preferredSampleRate );
2129 // Count the available ports containing the client name as device
2130 // channels. Jack "input ports" equal RtAudio output channels.
2131 unsigned int nChannels = 0;
2132 ports = jack_get_ports( client, info.name.c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput );
2134 while ( ports[ nChannels ] ) nChannels++;
2136 info.outputChannels = nChannels;
2139 // Jack "output ports" equal RtAudio input channels.
2141 ports = jack_get_ports( client, info.name.c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput );
2143 while ( ports[ nChannels ] ) nChannels++;
2145 info.inputChannels = nChannels;
2148 if ( info.outputChannels == 0 && info.inputChannels == 0 ) {
2149 jack_client_close(client);
2150 errorText_ = "RtApiJack::getDeviceInfo: error determining Jack input/output channels!";
2151 error( RtAudioError::WARNING );
2155 // If device opens for both playback and capture, we determine the channels.
2156 if ( info.outputChannels > 0 && info.inputChannels > 0 )
2157 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
2159 // Jack always uses 32-bit floats.
2160 info.nativeFormats = RTAUDIO_FLOAT32;
2162 // Jack doesn't provide default devices so we'll use the first available one.
2163 if ( device == 0 && info.outputChannels > 0 )
2164 info.isDefaultOutput = true;
2165 if ( device == 0 && info.inputChannels > 0 )
2166 info.isDefaultInput = true;
2168 jack_client_close(client);
2173 static int jackCallbackHandler( jack_nframes_t nframes, void *infoPointer )
2175 CallbackInfo *info = (CallbackInfo *) infoPointer;
2177 RtApiJack *object = (RtApiJack *) info->object;
2178 if ( object->callbackEvent( (unsigned long) nframes ) == false ) return 1;
2183 // This function will be called by a spawned thread when the Jack
2184 // server signals that it is shutting down. It is necessary to handle
2185 // it this way because the jackShutdown() function must return before
2186 // the jack_deactivate() function (in closeStream()) will return.
2187 static void *jackCloseStream( void *ptr )
2189 CallbackInfo *info = (CallbackInfo *) ptr;
2190 RtApiJack *object = (RtApiJack *) info->object;
2192 object->closeStream();
2194 pthread_exit( NULL );
2196 static void jackShutdown( void *infoPointer )
2198 CallbackInfo *info = (CallbackInfo *) infoPointer;
2199 RtApiJack *object = (RtApiJack *) info->object;
2201 // Check current stream state. If stopped, then we'll assume this
2202 // was called as a result of a call to RtApiJack::stopStream (the
2203 // deactivation of a client handle causes this function to be called).
2204 // If not, we'll assume the Jack server is shutting down or some
2205 // other problem occurred and we should close the stream.
2206 if ( object->isStreamRunning() == false ) return;
2208 ThreadHandle threadId;
2209 pthread_create( &threadId, NULL, jackCloseStream, info );
2210 std::cerr << "\nRtApiJack: the Jack server is shutting down this client ... stream stopped and closed!!\n" << std::endl;
2213 static int jackXrun( void *infoPointer )
2215 JackHandle *handle = *((JackHandle **) infoPointer);
2217 if ( handle->ports[0] ) handle->xrun[0] = true;
2218 if ( handle->ports[1] ) handle->xrun[1] = true;
2223 bool RtApiJack :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
2224 unsigned int firstChannel, unsigned int sampleRate,
2225 RtAudioFormat format, unsigned int *bufferSize,
2226 RtAudio::StreamOptions *options )
2228 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2230 // Look for jack server and try to become a client (only do once per stream).
2231 jack_client_t *client = 0;
2232 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) {
2233 jack_options_t jackoptions = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
2234 jack_status_t *status = NULL;
2235 if ( options && !options->streamName.empty() )
2236 client = jack_client_open( options->streamName.c_str(), jackoptions, status );
2238 client = jack_client_open( "RtApiJack", jackoptions, status );
2239 if ( client == 0 ) {
2240 errorText_ = "RtApiJack::probeDeviceOpen: Jack server not found or connection error!";
2241 error( RtAudioError::WARNING );
2246 // The handle must have been created on an earlier pass.
2247 client = handle->client;
2251 std::string port, previousPort, deviceName;
2252 unsigned int nPorts = 0, nDevices = 0;
2253 ports = jack_get_ports( client, NULL, JACK_DEFAULT_AUDIO_TYPE, 0 );
2255 // Parse the port names up to the first colon (:).
2258 port = (char *) ports[ nPorts ];
2259 iColon = port.find(":");
2260 if ( iColon != std::string::npos ) {
2261 port = port.substr( 0, iColon );
2262 if ( port != previousPort ) {
2263 if ( nDevices == device ) deviceName = port;
2265 previousPort = port;
2268 } while ( ports[++nPorts] );
2272 if ( device >= nDevices ) {
2273 errorText_ = "RtApiJack::probeDeviceOpen: device ID is invalid!";
2277 unsigned long flag = JackPortIsInput;
2278 if ( mode == INPUT ) flag = JackPortIsOutput;
2280 if ( ! (options && (options->flags & RTAUDIO_JACK_DONT_CONNECT)) ) {
2281 // Count the available ports containing the client name as device
2282 // channels. Jack "input ports" equal RtAudio output channels.
2283 unsigned int nChannels = 0;
2284 ports = jack_get_ports( client, deviceName.c_str(), JACK_DEFAULT_AUDIO_TYPE, flag );
2286 while ( ports[ nChannels ] ) nChannels++;
2289 // Compare the jack ports for specified client to the requested number of channels.
2290 if ( nChannels < (channels + firstChannel) ) {
2291 errorStream_ << "RtApiJack::probeDeviceOpen: requested number of channels (" << channels << ") + offset (" << firstChannel << ") not found for specified device (" << device << ":" << deviceName << ").";
2292 errorText_ = errorStream_.str();
2297 // Check the jack server sample rate.
2298 unsigned int jackRate = jack_get_sample_rate( client );
2299 if ( sampleRate != jackRate ) {
2300 jack_client_close( client );
2301 errorStream_ << "RtApiJack::probeDeviceOpen: the requested sample rate (" << sampleRate << ") is different than the JACK server rate (" << jackRate << ").";
2302 errorText_ = errorStream_.str();
2305 stream_.sampleRate = jackRate;
2307 // Get the latency of the JACK port.
2308 ports = jack_get_ports( client, deviceName.c_str(), JACK_DEFAULT_AUDIO_TYPE, flag );
2309 if ( ports[ firstChannel ] ) {
2311 jack_latency_callback_mode_t cbmode = (mode == INPUT ? JackCaptureLatency : JackPlaybackLatency);
2312 // the range (usually the min and max are equal)
2313 jack_latency_range_t latrange; latrange.min = latrange.max = 0;
2314 // get the latency range
2315 jack_port_get_latency_range( jack_port_by_name( client, ports[firstChannel] ), cbmode, &latrange );
2316 // be optimistic, use the min!
2317 stream_.latency[mode] = latrange.min;
2318 //stream_.latency[mode] = jack_port_get_latency( jack_port_by_name( client, ports[ firstChannel ] ) );
2322 // The jack server always uses 32-bit floating-point data.
2323 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
2324 stream_.userFormat = format;
2326 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
2327 else stream_.userInterleaved = true;
2329 // Jack always uses non-interleaved buffers.
2330 stream_.deviceInterleaved[mode] = false;
2332 // Jack always provides host byte-ordered data.
2333 stream_.doByteSwap[mode] = false;
2335 // Get the buffer size. The buffer size and number of buffers
2336 // (periods) is set when the jack server is started.
2337 stream_.bufferSize = (int) jack_get_buffer_size( client );
2338 *bufferSize = stream_.bufferSize;
2340 stream_.nDeviceChannels[mode] = channels;
2341 stream_.nUserChannels[mode] = channels;
2343 // Set flags for buffer conversion.
2344 stream_.doConvertBuffer[mode] = false;
2345 if ( stream_.userFormat != stream_.deviceFormat[mode] )
2346 stream_.doConvertBuffer[mode] = true;
2347 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
2348 stream_.nUserChannels[mode] > 1 )
2349 stream_.doConvertBuffer[mode] = true;
2351 // Allocate our JackHandle structure for the stream.
2352 if ( handle == 0 ) {
2354 handle = new JackHandle;
2356 catch ( std::bad_alloc& ) {
2357 errorText_ = "RtApiJack::probeDeviceOpen: error allocating JackHandle memory.";
2361 if ( pthread_cond_init(&handle->condition, NULL) ) {
2362 errorText_ = "RtApiJack::probeDeviceOpen: error initializing pthread condition variable.";
2365 stream_.apiHandle = (void *) handle;
2366 handle->client = client;
2368 handle->deviceName[mode] = deviceName;
2370 // Allocate necessary internal buffers.
2371 unsigned long bufferBytes;
2372 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
2373 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
2374 if ( stream_.userBuffer[mode] == NULL ) {
2375 errorText_ = "RtApiJack::probeDeviceOpen: error allocating user buffer memory.";
2379 if ( stream_.doConvertBuffer[mode] ) {
2381 bool makeBuffer = true;
2382 if ( mode == OUTPUT )
2383 bufferBytes = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
2384 else { // mode == INPUT
2385 bufferBytes = stream_.nDeviceChannels[1] * formatBytes( stream_.deviceFormat[1] );
2386 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
2387 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes(stream_.deviceFormat[0]);
2388 if ( bufferBytes < bytesOut ) makeBuffer = false;
2393 bufferBytes *= *bufferSize;
2394 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
2395 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
2396 if ( stream_.deviceBuffer == NULL ) {
2397 errorText_ = "RtApiJack::probeDeviceOpen: error allocating device buffer memory.";
2403 // Allocate memory for the Jack ports (channels) identifiers.
2404 handle->ports[mode] = (jack_port_t **) malloc ( sizeof (jack_port_t *) * channels );
2405 if ( handle->ports[mode] == NULL ) {
2406 errorText_ = "RtApiJack::probeDeviceOpen: error allocating port memory.";
2410 stream_.device[mode] = device;
2411 stream_.channelOffset[mode] = firstChannel;
2412 stream_.state = STREAM_STOPPED;
2413 stream_.callbackInfo.object = (void *) this;
2415 if ( stream_.mode == OUTPUT && mode == INPUT )
2416 // We had already set up the stream for output.
2417 stream_.mode = DUPLEX;
2419 stream_.mode = mode;
2420 jack_set_process_callback( handle->client, jackCallbackHandler, (void *) &stream_.callbackInfo );
2421 jack_set_xrun_callback( handle->client, jackXrun, (void *) &stream_.apiHandle );
2422 jack_on_shutdown( handle->client, jackShutdown, (void *) &stream_.callbackInfo );
2425 // Register our ports.
2427 if ( mode == OUTPUT ) {
2428 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2429 snprintf( label, 64, "outport %d", i );
2430 handle->ports[0][i] = jack_port_register( handle->client, (const char *)label,
2431 JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput, 0 );
2435 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2436 snprintf( label, 64, "inport %d", i );
2437 handle->ports[1][i] = jack_port_register( handle->client, (const char *)label,
2438 JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput, 0 );
2442 // Setup the buffer conversion information structure. We don't use
2443 // buffers to do channel offsets, so we override that parameter
2445 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
2447 if ( options && options->flags & RTAUDIO_JACK_DONT_CONNECT ) shouldAutoconnect_ = false;
2453 pthread_cond_destroy( &handle->condition );
2454 jack_client_close( handle->client );
2456 if ( handle->ports[0] ) free( handle->ports[0] );
2457 if ( handle->ports[1] ) free( handle->ports[1] );
2460 stream_.apiHandle = 0;
2463 for ( int i=0; i<2; i++ ) {
2464 if ( stream_.userBuffer[i] ) {
2465 free( stream_.userBuffer[i] );
2466 stream_.userBuffer[i] = 0;
2470 if ( stream_.deviceBuffer ) {
2471 free( stream_.deviceBuffer );
2472 stream_.deviceBuffer = 0;
2478 void RtApiJack :: closeStream( void )
2480 if ( stream_.state == STREAM_CLOSED ) {
2481 errorText_ = "RtApiJack::closeStream(): no open stream to close!";
2482 error( RtAudioError::WARNING );
2486 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2489 if ( stream_.state == STREAM_RUNNING )
2490 jack_deactivate( handle->client );
2492 jack_client_close( handle->client );
2496 if ( handle->ports[0] ) free( handle->ports[0] );
2497 if ( handle->ports[1] ) free( handle->ports[1] );
2498 pthread_cond_destroy( &handle->condition );
2500 stream_.apiHandle = 0;
2503 for ( int i=0; i<2; i++ ) {
2504 if ( stream_.userBuffer[i] ) {
2505 free( stream_.userBuffer[i] );
2506 stream_.userBuffer[i] = 0;
2510 if ( stream_.deviceBuffer ) {
2511 free( stream_.deviceBuffer );
2512 stream_.deviceBuffer = 0;
2515 stream_.mode = UNINITIALIZED;
2516 stream_.state = STREAM_CLOSED;
2519 void RtApiJack :: startStream( void )
2522 if ( stream_.state == STREAM_RUNNING ) {
2523 errorText_ = "RtApiJack::startStream(): the stream is already running!";
2524 error( RtAudioError::WARNING );
2528 #if defined( HAVE_GETTIMEOFDAY )
2529 gettimeofday( &stream_.lastTickTimestamp, NULL );
2532 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2533 int result = jack_activate( handle->client );
2535 errorText_ = "RtApiJack::startStream(): unable to activate JACK client!";
2541 // Get the list of available ports.
2542 if ( shouldAutoconnect_ && (stream_.mode == OUTPUT || stream_.mode == DUPLEX) ) {
2544 ports = jack_get_ports( handle->client, handle->deviceName[0].c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput);
2545 if ( ports == NULL) {
2546 errorText_ = "RtApiJack::startStream(): error determining available JACK input ports!";
2550 // Now make the port connections. Since RtAudio wasn't designed to
2551 // allow the user to select particular channels of a device, we'll
2552 // just open the first "nChannels" ports with offset.
2553 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2555 if ( ports[ stream_.channelOffset[0] + i ] )
2556 result = jack_connect( handle->client, jack_port_name( handle->ports[0][i] ), ports[ stream_.channelOffset[0] + i ] );
2559 errorText_ = "RtApiJack::startStream(): error connecting output ports!";
2566 if ( shouldAutoconnect_ && (stream_.mode == INPUT || stream_.mode == DUPLEX) ) {
2568 ports = jack_get_ports( handle->client, handle->deviceName[1].c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput );
2569 if ( ports == NULL) {
2570 errorText_ = "RtApiJack::startStream(): error determining available JACK output ports!";
2574 // Now make the port connections. See note above.
2575 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2577 if ( ports[ stream_.channelOffset[1] + i ] )
2578 result = jack_connect( handle->client, ports[ stream_.channelOffset[1] + i ], jack_port_name( handle->ports[1][i] ) );
2581 errorText_ = "RtApiJack::startStream(): error connecting input ports!";
2588 handle->drainCounter = 0;
2589 handle->internalDrain = false;
2590 stream_.state = STREAM_RUNNING;
2593 if ( result == 0 ) return;
2594 error( RtAudioError::SYSTEM_ERROR );
2597 void RtApiJack :: stopStream( void )
2600 if ( stream_.state == STREAM_STOPPED ) {
2601 errorText_ = "RtApiJack::stopStream(): the stream is already stopped!";
2602 error( RtAudioError::WARNING );
2606 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2607 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
2609 if ( handle->drainCounter == 0 ) {
2610 handle->drainCounter = 2;
2611 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
2615 jack_deactivate( handle->client );
2616 stream_.state = STREAM_STOPPED;
2619 void RtApiJack :: abortStream( void )
2622 if ( stream_.state == STREAM_STOPPED ) {
2623 errorText_ = "RtApiJack::abortStream(): the stream is already stopped!";
2624 error( RtAudioError::WARNING );
2628 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2629 handle->drainCounter = 2;
2634 // This function will be called by a spawned thread when the user
2635 // callback function signals that the stream should be stopped or
2636 // aborted. It is necessary to handle it this way because the
2637 // callbackEvent() function must return before the jack_deactivate()
2638 // function will return.
2639 static void *jackStopStream( void *ptr )
2641 CallbackInfo *info = (CallbackInfo *) ptr;
2642 RtApiJack *object = (RtApiJack *) info->object;
2644 object->stopStream();
2645 pthread_exit( NULL );
2648 bool RtApiJack :: callbackEvent( unsigned long nframes )
2650 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
2651 if ( stream_.state == STREAM_CLOSED ) {
2652 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
2653 error( RtAudioError::WARNING );
2656 if ( stream_.bufferSize != nframes ) {
2657 errorText_ = "RtApiCore::callbackEvent(): the JACK buffer size has changed ... cannot process!";
2658 error( RtAudioError::WARNING );
2662 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
2663 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2665 // Check if we were draining the stream and signal is finished.
2666 if ( handle->drainCounter > 3 ) {
2667 ThreadHandle threadId;
2669 stream_.state = STREAM_STOPPING;
2670 if ( handle->internalDrain == true )
2671 pthread_create( &threadId, NULL, jackStopStream, info );
2673 pthread_cond_signal( &handle->condition );
2677 // Invoke user callback first, to get fresh output data.
2678 if ( handle->drainCounter == 0 ) {
2679 RtAudioCallback callback = (RtAudioCallback) info->callback;
2680 double streamTime = getStreamTime();
2681 RtAudioStreamStatus status = 0;
2682 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
2683 status |= RTAUDIO_OUTPUT_UNDERFLOW;
2684 handle->xrun[0] = false;
2686 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
2687 status |= RTAUDIO_INPUT_OVERFLOW;
2688 handle->xrun[1] = false;
2690 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
2691 stream_.bufferSize, streamTime, status, info->userData );
2692 if ( cbReturnValue == 2 ) {
2693 stream_.state = STREAM_STOPPING;
2694 handle->drainCounter = 2;
2696 pthread_create( &id, NULL, jackStopStream, info );
2699 else if ( cbReturnValue == 1 ) {
2700 handle->drainCounter = 1;
2701 handle->internalDrain = true;
2705 jack_default_audio_sample_t *jackbuffer;
2706 unsigned long bufferBytes = nframes * sizeof( jack_default_audio_sample_t );
2707 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
2709 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
2711 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
2712 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2713 memset( jackbuffer, 0, bufferBytes );
2717 else if ( stream_.doConvertBuffer[0] ) {
2719 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
2721 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
2722 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2723 memcpy( jackbuffer, &stream_.deviceBuffer[i*bufferBytes], bufferBytes );
2726 else { // no buffer conversion
2727 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2728 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2729 memcpy( jackbuffer, &stream_.userBuffer[0][i*bufferBytes], bufferBytes );
2734 // Don't bother draining input
2735 if ( handle->drainCounter ) {
2736 handle->drainCounter++;
2740 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
2742 if ( stream_.doConvertBuffer[1] ) {
2743 for ( unsigned int i=0; i<stream_.nDeviceChannels[1]; i++ ) {
2744 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
2745 memcpy( &stream_.deviceBuffer[i*bufferBytes], jackbuffer, bufferBytes );
2747 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
2749 else { // no buffer conversion
2750 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2751 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
2752 memcpy( &stream_.userBuffer[1][i*bufferBytes], jackbuffer, bufferBytes );
2758 RtApi::tickStreamTime();
2761 //******************** End of __UNIX_JACK__ *********************//
2764 #if defined(__WINDOWS_ASIO__) // ASIO API on Windows
2766 // The ASIO API is designed around a callback scheme, so this
2767 // implementation is similar to that used for OS-X CoreAudio and Linux
2768 // Jack. The primary constraint with ASIO is that it only allows
2769 // access to a single driver at a time. Thus, it is not possible to
2770 // have more than one simultaneous RtAudio stream.
2772 // This implementation also requires a number of external ASIO files
2773 // and a few global variables. The ASIO callback scheme does not
2774 // allow for the passing of user data, so we must create a global
2775 // pointer to our callbackInfo structure.
2777 // On unix systems, we make use of a pthread condition variable.
2778 // Since there is no equivalent in Windows, I hacked something based
2779 // on information found in
2780 // http://www.cs.wustl.edu/~schmidt/win32-cv-1.html.
2782 #include "asiosys.h"
2784 #include "iasiothiscallresolver.h"
2785 #include "asiodrivers.h"
2788 static AsioDrivers drivers;
2789 static ASIOCallbacks asioCallbacks;
2790 static ASIODriverInfo driverInfo;
2791 static CallbackInfo *asioCallbackInfo;
2792 static bool asioXRun;
2795 int drainCounter; // Tracks callback counts when draining
2796 bool internalDrain; // Indicates if stop is initiated from callback or not.
2797 ASIOBufferInfo *bufferInfos;
2801 :drainCounter(0), internalDrain(false), bufferInfos(0) {}
2804 // Function declarations (definitions at end of section)
2805 static const char* getAsioErrorString( ASIOError result );
2806 static void sampleRateChanged( ASIOSampleRate sRate );
2807 static long asioMessages( long selector, long value, void* message, double* opt );
2809 RtApiAsio :: RtApiAsio()
2811 // ASIO cannot run on a multi-threaded appartment. You can call
2812 // CoInitialize beforehand, but it must be for appartment threading
2813 // (in which case, CoInitilialize will return S_FALSE here).
2814 coInitialized_ = false;
2815 HRESULT hr = CoInitialize( NULL );
2817 errorText_ = "RtApiAsio::ASIO requires a single-threaded appartment. Call CoInitializeEx(0,COINIT_APARTMENTTHREADED)";
2818 error( RtAudioError::WARNING );
2820 coInitialized_ = true;
2822 drivers.removeCurrentDriver();
2823 driverInfo.asioVersion = 2;
2825 // See note in DirectSound implementation about GetDesktopWindow().
2826 driverInfo.sysRef = GetForegroundWindow();
2829 RtApiAsio :: ~RtApiAsio()
2831 if ( stream_.state != STREAM_CLOSED ) closeStream();
2832 if ( coInitialized_ ) CoUninitialize();
2835 unsigned int RtApiAsio :: getDeviceCount( void )
2837 return (unsigned int) drivers.asioGetNumDev();
2840 RtAudio::DeviceInfo RtApiAsio :: getDeviceInfo( unsigned int device )
2842 RtAudio::DeviceInfo info;
2843 info.probed = false;
2846 unsigned int nDevices = getDeviceCount();
2847 if ( nDevices == 0 ) {
2848 errorText_ = "RtApiAsio::getDeviceInfo: no devices found!";
2849 error( RtAudioError::INVALID_USE );
2853 if ( device >= nDevices ) {
2854 errorText_ = "RtApiAsio::getDeviceInfo: device ID is invalid!";
2855 error( RtAudioError::INVALID_USE );
2859 // If a stream is already open, we cannot probe other devices. Thus, use the saved results.
2860 if ( stream_.state != STREAM_CLOSED ) {
2861 if ( device >= devices_.size() ) {
2862 errorText_ = "RtApiAsio::getDeviceInfo: device ID was not present before stream was opened.";
2863 error( RtAudioError::WARNING );
2866 return devices_[ device ];
2869 char driverName[32];
2870 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
2871 if ( result != ASE_OK ) {
2872 errorStream_ << "RtApiAsio::getDeviceInfo: unable to get driver name (" << getAsioErrorString( result ) << ").";
2873 errorText_ = errorStream_.str();
2874 error( RtAudioError::WARNING );
2878 info.name = driverName;
2880 if ( !drivers.loadDriver( driverName ) ) {
2881 errorStream_ << "RtApiAsio::getDeviceInfo: unable to load driver (" << driverName << ").";
2882 errorText_ = errorStream_.str();
2883 error( RtAudioError::WARNING );
2887 result = ASIOInit( &driverInfo );
2888 if ( result != ASE_OK ) {
2889 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
2890 errorText_ = errorStream_.str();
2891 error( RtAudioError::WARNING );
2895 // Determine the device channel information.
2896 long inputChannels, outputChannels;
2897 result = ASIOGetChannels( &inputChannels, &outputChannels );
2898 if ( result != ASE_OK ) {
2899 drivers.removeCurrentDriver();
2900 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
2901 errorText_ = errorStream_.str();
2902 error( RtAudioError::WARNING );
2906 info.outputChannels = outputChannels;
2907 info.inputChannels = inputChannels;
2908 if ( info.outputChannels > 0 && info.inputChannels > 0 )
2909 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
2911 // Determine the supported sample rates.
2912 info.sampleRates.clear();
2913 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
2914 result = ASIOCanSampleRate( (ASIOSampleRate) SAMPLE_RATES[i] );
2915 if ( result == ASE_OK ) {
2916 info.sampleRates.push_back( SAMPLE_RATES[i] );
2918 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
2919 info.preferredSampleRate = SAMPLE_RATES[i];
2923 // Determine supported data types ... just check first channel and assume rest are the same.
2924 ASIOChannelInfo channelInfo;
2925 channelInfo.channel = 0;
2926 channelInfo.isInput = true;
2927 if ( info.inputChannels <= 0 ) channelInfo.isInput = false;
2928 result = ASIOGetChannelInfo( &channelInfo );
2929 if ( result != ASE_OK ) {
2930 drivers.removeCurrentDriver();
2931 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting driver channel info (" << driverName << ").";
2932 errorText_ = errorStream_.str();
2933 error( RtAudioError::WARNING );
2937 info.nativeFormats = 0;
2938 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB )
2939 info.nativeFormats |= RTAUDIO_SINT16;
2940 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB )
2941 info.nativeFormats |= RTAUDIO_SINT32;
2942 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB )
2943 info.nativeFormats |= RTAUDIO_FLOAT32;
2944 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB )
2945 info.nativeFormats |= RTAUDIO_FLOAT64;
2946 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB )
2947 info.nativeFormats |= RTAUDIO_SINT24;
2949 if ( info.outputChannels > 0 )
2950 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
2951 if ( info.inputChannels > 0 )
2952 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
2955 drivers.removeCurrentDriver();
2959 static void bufferSwitch( long index, ASIOBool /*processNow*/ )
2961 RtApiAsio *object = (RtApiAsio *) asioCallbackInfo->object;
2962 object->callbackEvent( index );
2965 void RtApiAsio :: saveDeviceInfo( void )
2969 unsigned int nDevices = getDeviceCount();
2970 devices_.resize( nDevices );
2971 for ( unsigned int i=0; i<nDevices; i++ )
2972 devices_[i] = getDeviceInfo( i );
2975 bool RtApiAsio :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
2976 unsigned int firstChannel, unsigned int sampleRate,
2977 RtAudioFormat format, unsigned int *bufferSize,
2978 RtAudio::StreamOptions *options )
2979 {////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
2981 bool isDuplexInput = mode == INPUT && stream_.mode == OUTPUT;
2983 // For ASIO, a duplex stream MUST use the same driver.
2984 if ( isDuplexInput && stream_.device[0] != device ) {
2985 errorText_ = "RtApiAsio::probeDeviceOpen: an ASIO duplex stream must use the same device for input and output!";
2989 char driverName[32];
2990 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
2991 if ( result != ASE_OK ) {
2992 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to get driver name (" << getAsioErrorString( result ) << ").";
2993 errorText_ = errorStream_.str();
2997 // Only load the driver once for duplex stream.
2998 if ( !isDuplexInput ) {
2999 // The getDeviceInfo() function will not work when a stream is open
3000 // because ASIO does not allow multiple devices to run at the same
3001 // time. Thus, we'll probe the system before opening a stream and
3002 // save the results for use by getDeviceInfo().
3003 this->saveDeviceInfo();
3005 if ( !drivers.loadDriver( driverName ) ) {
3006 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to load driver (" << driverName << ").";
3007 errorText_ = errorStream_.str();
3011 result = ASIOInit( &driverInfo );
3012 if ( result != ASE_OK ) {
3013 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
3014 errorText_ = errorStream_.str();
3019 // keep them before any "goto error", they are used for error cleanup + goto device boundary checks
3020 bool buffersAllocated = false;
3021 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3022 unsigned int nChannels;
3025 // Check the device channel count.
3026 long inputChannels, outputChannels;
3027 result = ASIOGetChannels( &inputChannels, &outputChannels );
3028 if ( result != ASE_OK ) {
3029 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
3030 errorText_ = errorStream_.str();
3034 if ( ( mode == OUTPUT && (channels+firstChannel) > (unsigned int) outputChannels) ||
3035 ( mode == INPUT && (channels+firstChannel) > (unsigned int) inputChannels) ) {
3036 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested channel count (" << channels << ") + offset (" << firstChannel << ").";
3037 errorText_ = errorStream_.str();
3040 stream_.nDeviceChannels[mode] = channels;
3041 stream_.nUserChannels[mode] = channels;
3042 stream_.channelOffset[mode] = firstChannel;
3044 // Verify the sample rate is supported.
3045 result = ASIOCanSampleRate( (ASIOSampleRate) sampleRate );
3046 if ( result != ASE_OK ) {
3047 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested sample rate (" << sampleRate << ").";
3048 errorText_ = errorStream_.str();
3052 // Get the current sample rate
3053 ASIOSampleRate currentRate;
3054 result = ASIOGetSampleRate( ¤tRate );
3055 if ( result != ASE_OK ) {
3056 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error getting sample rate.";
3057 errorText_ = errorStream_.str();
3061 // Set the sample rate only if necessary
3062 if ( currentRate != sampleRate ) {
3063 result = ASIOSetSampleRate( (ASIOSampleRate) sampleRate );
3064 if ( result != ASE_OK ) {
3065 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error setting sample rate (" << sampleRate << ").";
3066 errorText_ = errorStream_.str();
3071 // Determine the driver data type.
3072 ASIOChannelInfo channelInfo;
3073 channelInfo.channel = 0;
3074 if ( mode == OUTPUT ) channelInfo.isInput = false;
3075 else channelInfo.isInput = true;
3076 result = ASIOGetChannelInfo( &channelInfo );
3077 if ( result != ASE_OK ) {
3078 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting data format.";
3079 errorText_ = errorStream_.str();
3083 // Assuming WINDOWS host is always little-endian.
3084 stream_.doByteSwap[mode] = false;
3085 stream_.userFormat = format;
3086 stream_.deviceFormat[mode] = 0;
3087 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB ) {
3088 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
3089 if ( channelInfo.type == ASIOSTInt16MSB ) stream_.doByteSwap[mode] = true;
3091 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB ) {
3092 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
3093 if ( channelInfo.type == ASIOSTInt32MSB ) stream_.doByteSwap[mode] = true;
3095 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB ) {
3096 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
3097 if ( channelInfo.type == ASIOSTFloat32MSB ) stream_.doByteSwap[mode] = true;
3099 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB ) {
3100 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
3101 if ( channelInfo.type == ASIOSTFloat64MSB ) stream_.doByteSwap[mode] = true;
3103 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB ) {
3104 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
3105 if ( channelInfo.type == ASIOSTInt24MSB ) stream_.doByteSwap[mode] = true;
3108 if ( stream_.deviceFormat[mode] == 0 ) {
3109 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") data format not supported by RtAudio.";
3110 errorText_ = errorStream_.str();
3114 // Set the buffer size. For a duplex stream, this will end up
3115 // setting the buffer size based on the input constraints, which
3117 long minSize, maxSize, preferSize, granularity;
3118 result = ASIOGetBufferSize( &minSize, &maxSize, &preferSize, &granularity );
3119 if ( result != ASE_OK ) {
3120 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting buffer size.";
3121 errorText_ = errorStream_.str();
3125 if ( isDuplexInput ) {
3126 // When this is the duplex input (output was opened before), then we have to use the same
3127 // buffersize as the output, because it might use the preferred buffer size, which most
3128 // likely wasn't passed as input to this. The buffer sizes have to be identically anyway,
3129 // So instead of throwing an error, make them equal. The caller uses the reference
3130 // to the "bufferSize" param as usual to set up processing buffers.
3132 *bufferSize = stream_.bufferSize;
3135 if ( *bufferSize == 0 ) *bufferSize = preferSize;
3136 else if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
3137 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
3138 else if ( granularity == -1 ) {
3139 // Make sure bufferSize is a power of two.
3140 int log2_of_min_size = 0;
3141 int log2_of_max_size = 0;
3143 for ( unsigned int i = 0; i < sizeof(long) * 8; i++ ) {
3144 if ( minSize & ((long)1 << i) ) log2_of_min_size = i;
3145 if ( maxSize & ((long)1 << i) ) log2_of_max_size = i;
3148 long min_delta = std::abs( (long)*bufferSize - ((long)1 << log2_of_min_size) );
3149 int min_delta_num = log2_of_min_size;
3151 for (int i = log2_of_min_size + 1; i <= log2_of_max_size; i++) {
3152 long current_delta = std::abs( (long)*bufferSize - ((long)1 << i) );
3153 if (current_delta < min_delta) {
3154 min_delta = current_delta;
3159 *bufferSize = ( (unsigned int)1 << min_delta_num );
3160 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
3161 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
3163 else if ( granularity != 0 ) {
3164 // Set to an even multiple of granularity, rounding up.
3165 *bufferSize = (*bufferSize + granularity-1) / granularity * granularity;
3170 // we don't use it anymore, see above!
3171 // Just left it here for the case...
3172 if ( isDuplexInput && stream_.bufferSize != *bufferSize ) {
3173 errorText_ = "RtApiAsio::probeDeviceOpen: input/output buffersize discrepancy!";
3178 stream_.bufferSize = *bufferSize;
3179 stream_.nBuffers = 2;
3181 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
3182 else stream_.userInterleaved = true;
3184 // ASIO always uses non-interleaved buffers.
3185 stream_.deviceInterleaved[mode] = false;
3187 // Allocate, if necessary, our AsioHandle structure for the stream.
3188 if ( handle == 0 ) {
3190 handle = new AsioHandle;
3192 catch ( std::bad_alloc& ) {
3193 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating AsioHandle memory.";
3196 handle->bufferInfos = 0;
3198 // Create a manual-reset event.
3199 handle->condition = CreateEvent( NULL, // no security
3200 TRUE, // manual-reset
3201 FALSE, // non-signaled initially
3203 stream_.apiHandle = (void *) handle;
3206 // Create the ASIO internal buffers. Since RtAudio sets up input
3207 // and output separately, we'll have to dispose of previously
3208 // created output buffers for a duplex stream.
3209 if ( mode == INPUT && stream_.mode == OUTPUT ) {
3210 ASIODisposeBuffers();
3211 if ( handle->bufferInfos ) free( handle->bufferInfos );
3214 // Allocate, initialize, and save the bufferInfos in our stream callbackInfo structure.
3216 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
3217 handle->bufferInfos = (ASIOBufferInfo *) malloc( nChannels * sizeof(ASIOBufferInfo) );
3218 if ( handle->bufferInfos == NULL ) {
3219 errorStream_ << "RtApiAsio::probeDeviceOpen: error allocating bufferInfo memory for driver (" << driverName << ").";
3220 errorText_ = errorStream_.str();
3224 ASIOBufferInfo *infos;
3225 infos = handle->bufferInfos;
3226 for ( i=0; i<stream_.nDeviceChannels[0]; i++, infos++ ) {
3227 infos->isInput = ASIOFalse;
3228 infos->channelNum = i + stream_.channelOffset[0];
3229 infos->buffers[0] = infos->buffers[1] = 0;
3231 for ( i=0; i<stream_.nDeviceChannels[1]; i++, infos++ ) {
3232 infos->isInput = ASIOTrue;
3233 infos->channelNum = i + stream_.channelOffset[1];
3234 infos->buffers[0] = infos->buffers[1] = 0;
3237 // prepare for callbacks
3238 stream_.sampleRate = sampleRate;
3239 stream_.device[mode] = device;
3240 stream_.mode = isDuplexInput ? DUPLEX : mode;
3242 // store this class instance before registering callbacks, that are going to use it
3243 asioCallbackInfo = &stream_.callbackInfo;
3244 stream_.callbackInfo.object = (void *) this;
3246 // Set up the ASIO callback structure and create the ASIO data buffers.
3247 asioCallbacks.bufferSwitch = &bufferSwitch;
3248 asioCallbacks.sampleRateDidChange = &sampleRateChanged;
3249 asioCallbacks.asioMessage = &asioMessages;
3250 asioCallbacks.bufferSwitchTimeInfo = NULL;
3251 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
3252 if ( result != ASE_OK ) {
3253 // Standard method failed. This can happen with strict/misbehaving drivers that return valid buffer size ranges
3254 // but only accept the preferred buffer size as parameter for ASIOCreateBuffers (e.g. Creative's ASIO driver).
3255 // In that case, let's be naïve and try that instead.
3256 *bufferSize = preferSize;
3257 stream_.bufferSize = *bufferSize;
3258 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
3261 if ( result != ASE_OK ) {
3262 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") creating buffers.";
3263 errorText_ = errorStream_.str();
3266 buffersAllocated = true;
3267 stream_.state = STREAM_STOPPED;
3269 // Set flags for buffer conversion.
3270 stream_.doConvertBuffer[mode] = false;
3271 if ( stream_.userFormat != stream_.deviceFormat[mode] )
3272 stream_.doConvertBuffer[mode] = true;
3273 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
3274 stream_.nUserChannels[mode] > 1 )
3275 stream_.doConvertBuffer[mode] = true;
3277 // Allocate necessary internal buffers
3278 unsigned long bufferBytes;
3279 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
3280 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
3281 if ( stream_.userBuffer[mode] == NULL ) {
3282 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating user buffer memory.";
3286 if ( stream_.doConvertBuffer[mode] ) {
3288 bool makeBuffer = true;
3289 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
3290 if ( isDuplexInput && stream_.deviceBuffer ) {
3291 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
3292 if ( bufferBytes <= bytesOut ) makeBuffer = false;
3296 bufferBytes *= *bufferSize;
3297 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
3298 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
3299 if ( stream_.deviceBuffer == NULL ) {
3300 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating device buffer memory.";
3306 // Determine device latencies
3307 long inputLatency, outputLatency;
3308 result = ASIOGetLatencies( &inputLatency, &outputLatency );
3309 if ( result != ASE_OK ) {
3310 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting latency.";
3311 errorText_ = errorStream_.str();
3312 error( RtAudioError::WARNING); // warn but don't fail
3315 stream_.latency[0] = outputLatency;
3316 stream_.latency[1] = inputLatency;
3319 // Setup the buffer conversion information structure. We don't use
3320 // buffers to do channel offsets, so we override that parameter
3322 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
3327 if ( !isDuplexInput ) {
3328 // the cleanup for error in the duplex input, is done by RtApi::openStream
3329 // So we clean up for single channel only
3331 if ( buffersAllocated )
3332 ASIODisposeBuffers();
3334 drivers.removeCurrentDriver();
3337 CloseHandle( handle->condition );
3338 if ( handle->bufferInfos )
3339 free( handle->bufferInfos );
3342 stream_.apiHandle = 0;
3346 if ( stream_.userBuffer[mode] ) {
3347 free( stream_.userBuffer[mode] );
3348 stream_.userBuffer[mode] = 0;
3351 if ( stream_.deviceBuffer ) {
3352 free( stream_.deviceBuffer );
3353 stream_.deviceBuffer = 0;
3358 }////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
3360 void RtApiAsio :: closeStream()
3362 if ( stream_.state == STREAM_CLOSED ) {
3363 errorText_ = "RtApiAsio::closeStream(): no open stream to close!";
3364 error( RtAudioError::WARNING );
3368 if ( stream_.state == STREAM_RUNNING ) {
3369 stream_.state = STREAM_STOPPED;
3372 ASIODisposeBuffers();
3373 drivers.removeCurrentDriver();
3375 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3377 CloseHandle( handle->condition );
3378 if ( handle->bufferInfos )
3379 free( handle->bufferInfos );
3381 stream_.apiHandle = 0;
3384 for ( int i=0; i<2; i++ ) {
3385 if ( stream_.userBuffer[i] ) {
3386 free( stream_.userBuffer[i] );
3387 stream_.userBuffer[i] = 0;
3391 if ( stream_.deviceBuffer ) {
3392 free( stream_.deviceBuffer );
3393 stream_.deviceBuffer = 0;
3396 stream_.mode = UNINITIALIZED;
3397 stream_.state = STREAM_CLOSED;
3400 bool stopThreadCalled = false;
3402 void RtApiAsio :: startStream()
3405 if ( stream_.state == STREAM_RUNNING ) {
3406 errorText_ = "RtApiAsio::startStream(): the stream is already running!";
3407 error( RtAudioError::WARNING );
3411 #if defined( HAVE_GETTIMEOFDAY )
3412 gettimeofday( &stream_.lastTickTimestamp, NULL );
3415 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3416 ASIOError result = ASIOStart();
3417 if ( result != ASE_OK ) {
3418 errorStream_ << "RtApiAsio::startStream: error (" << getAsioErrorString( result ) << ") starting device.";
3419 errorText_ = errorStream_.str();
3423 handle->drainCounter = 0;
3424 handle->internalDrain = false;
3425 ResetEvent( handle->condition );
3426 stream_.state = STREAM_RUNNING;
3430 stopThreadCalled = false;
3432 if ( result == ASE_OK ) return;
3433 error( RtAudioError::SYSTEM_ERROR );
3436 void RtApiAsio :: stopStream()
3439 if ( stream_.state == STREAM_STOPPED ) {
3440 errorText_ = "RtApiAsio::stopStream(): the stream is already stopped!";
3441 error( RtAudioError::WARNING );
3445 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3446 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
3447 if ( handle->drainCounter == 0 ) {
3448 handle->drainCounter = 2;
3449 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
3453 stream_.state = STREAM_STOPPED;
3455 ASIOError result = ASIOStop();
3456 if ( result != ASE_OK ) {
3457 errorStream_ << "RtApiAsio::stopStream: error (" << getAsioErrorString( result ) << ") stopping device.";
3458 errorText_ = errorStream_.str();
3461 if ( result == ASE_OK ) return;
3462 error( RtAudioError::SYSTEM_ERROR );
3465 void RtApiAsio :: abortStream()
3468 if ( stream_.state == STREAM_STOPPED ) {
3469 errorText_ = "RtApiAsio::abortStream(): the stream is already stopped!";
3470 error( RtAudioError::WARNING );
3474 // The following lines were commented-out because some behavior was
3475 // noted where the device buffers need to be zeroed to avoid
3476 // continuing sound, even when the device buffers are completely
3477 // disposed. So now, calling abort is the same as calling stop.
3478 // AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3479 // handle->drainCounter = 2;
3483 // This function will be called by a spawned thread when the user
3484 // callback function signals that the stream should be stopped or
3485 // aborted. It is necessary to handle it this way because the
3486 // callbackEvent() function must return before the ASIOStop()
3487 // function will return.
3488 static unsigned __stdcall asioStopStream( void *ptr )
3490 CallbackInfo *info = (CallbackInfo *) ptr;
3491 RtApiAsio *object = (RtApiAsio *) info->object;
3493 object->stopStream();
3498 bool RtApiAsio :: callbackEvent( long bufferIndex )
3500 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
3501 if ( stream_.state == STREAM_CLOSED ) {
3502 errorText_ = "RtApiAsio::callbackEvent(): the stream is closed ... this shouldn't happen!";
3503 error( RtAudioError::WARNING );
3507 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
3508 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3510 // Check if we were draining the stream and signal if finished.
3511 if ( handle->drainCounter > 3 ) {
3513 stream_.state = STREAM_STOPPING;
3514 if ( handle->internalDrain == false )
3515 SetEvent( handle->condition );
3516 else { // spawn a thread to stop the stream
3518 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
3519 &stream_.callbackInfo, 0, &threadId );
3524 // Invoke user callback to get fresh output data UNLESS we are
3526 if ( handle->drainCounter == 0 ) {
3527 RtAudioCallback callback = (RtAudioCallback) info->callback;
3528 double streamTime = getStreamTime();
3529 RtAudioStreamStatus status = 0;
3530 if ( stream_.mode != INPUT && asioXRun == true ) {
3531 status |= RTAUDIO_OUTPUT_UNDERFLOW;
3534 if ( stream_.mode != OUTPUT && asioXRun == true ) {
3535 status |= RTAUDIO_INPUT_OVERFLOW;
3538 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
3539 stream_.bufferSize, streamTime, status, info->userData );
3540 if ( cbReturnValue == 2 ) {
3541 stream_.state = STREAM_STOPPING;
3542 handle->drainCounter = 2;
3544 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
3545 &stream_.callbackInfo, 0, &threadId );
3548 else if ( cbReturnValue == 1 ) {
3549 handle->drainCounter = 1;
3550 handle->internalDrain = true;
3554 unsigned int nChannels, bufferBytes, i, j;
3555 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
3556 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
3558 bufferBytes = stream_.bufferSize * formatBytes( stream_.deviceFormat[0] );
3560 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
3562 for ( i=0, j=0; i<nChannels; i++ ) {
3563 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3564 memset( handle->bufferInfos[i].buffers[bufferIndex], 0, bufferBytes );
3568 else if ( stream_.doConvertBuffer[0] ) {
3570 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
3571 if ( stream_.doByteSwap[0] )
3572 byteSwapBuffer( stream_.deviceBuffer,
3573 stream_.bufferSize * stream_.nDeviceChannels[0],
3574 stream_.deviceFormat[0] );
3576 for ( i=0, j=0; i<nChannels; i++ ) {
3577 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3578 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
3579 &stream_.deviceBuffer[j++*bufferBytes], bufferBytes );
3585 if ( stream_.doByteSwap[0] )
3586 byteSwapBuffer( stream_.userBuffer[0],
3587 stream_.bufferSize * stream_.nUserChannels[0],
3588 stream_.userFormat );
3590 for ( i=0, j=0; i<nChannels; i++ ) {
3591 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3592 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
3593 &stream_.userBuffer[0][bufferBytes*j++], bufferBytes );
3599 // Don't bother draining input
3600 if ( handle->drainCounter ) {
3601 handle->drainCounter++;
3605 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
3607 bufferBytes = stream_.bufferSize * formatBytes(stream_.deviceFormat[1]);
3609 if (stream_.doConvertBuffer[1]) {
3611 // Always interleave ASIO input data.
3612 for ( i=0, j=0; i<nChannels; i++ ) {
3613 if ( handle->bufferInfos[i].isInput == ASIOTrue )
3614 memcpy( &stream_.deviceBuffer[j++*bufferBytes],
3615 handle->bufferInfos[i].buffers[bufferIndex],
3619 if ( stream_.doByteSwap[1] )
3620 byteSwapBuffer( stream_.deviceBuffer,
3621 stream_.bufferSize * stream_.nDeviceChannels[1],
3622 stream_.deviceFormat[1] );
3623 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
3627 for ( i=0, j=0; i<nChannels; i++ ) {
3628 if ( handle->bufferInfos[i].isInput == ASIOTrue ) {
3629 memcpy( &stream_.userBuffer[1][bufferBytes*j++],
3630 handle->bufferInfos[i].buffers[bufferIndex],
3635 if ( stream_.doByteSwap[1] )
3636 byteSwapBuffer( stream_.userBuffer[1],
3637 stream_.bufferSize * stream_.nUserChannels[1],
3638 stream_.userFormat );
3643 // The following call was suggested by Malte Clasen. While the API
3644 // documentation indicates it should not be required, some device
3645 // drivers apparently do not function correctly without it.
3648 RtApi::tickStreamTime();
3652 static void sampleRateChanged( ASIOSampleRate sRate )
3654 // The ASIO documentation says that this usually only happens during
3655 // external sync. Audio processing is not stopped by the driver,
3656 // actual sample rate might not have even changed, maybe only the
3657 // sample rate status of an AES/EBU or S/PDIF digital input at the
3660 RtApi *object = (RtApi *) asioCallbackInfo->object;
3662 object->stopStream();
3664 catch ( RtAudioError &exception ) {
3665 std::cerr << "\nRtApiAsio: sampleRateChanged() error (" << exception.getMessage() << ")!\n" << std::endl;
3669 std::cerr << "\nRtApiAsio: driver reports sample rate changed to " << sRate << " ... stream stopped!!!\n" << std::endl;
3672 static long asioMessages( long selector, long value, void* /*message*/, double* /*opt*/ )
3676 switch( selector ) {
3677 case kAsioSelectorSupported:
3678 if ( value == kAsioResetRequest
3679 || value == kAsioEngineVersion
3680 || value == kAsioResyncRequest
3681 || value == kAsioLatenciesChanged
3682 // The following three were added for ASIO 2.0, you don't
3683 // necessarily have to support them.
3684 || value == kAsioSupportsTimeInfo
3685 || value == kAsioSupportsTimeCode
3686 || value == kAsioSupportsInputMonitor)
3689 case kAsioResetRequest:
3690 // Defer the task and perform the reset of the driver during the
3691 // next "safe" situation. You cannot reset the driver right now,
3692 // as this code is called from the driver. Reset the driver is
3693 // done by completely destruct is. I.e. ASIOStop(),
3694 // ASIODisposeBuffers(), Destruction Afterwards you initialize the
3696 std::cerr << "\nRtApiAsio: driver reset requested!!!" << std::endl;
3699 case kAsioResyncRequest:
3700 // This informs the application that the driver encountered some
3701 // non-fatal data loss. It is used for synchronization purposes
3702 // of different media. Added mainly to work around the Win16Mutex
3703 // problems in Windows 95/98 with the Windows Multimedia system,
3704 // which could lose data because the Mutex was held too long by
3705 // another thread. However a driver can issue it in other
3707 // std::cerr << "\nRtApiAsio: driver resync requested!!!" << std::endl;
3711 case kAsioLatenciesChanged:
3712 // This will inform the host application that the drivers were
3713 // latencies changed. Beware, it this does not mean that the
3714 // buffer sizes have changed! You might need to update internal
3716 std::cerr << "\nRtApiAsio: driver latency may have changed!!!" << std::endl;
3719 case kAsioEngineVersion:
3720 // Return the supported ASIO version of the host application. If
3721 // a host application does not implement this selector, ASIO 1.0
3722 // is assumed by the driver.
3725 case kAsioSupportsTimeInfo:
3726 // Informs the driver whether the
3727 // asioCallbacks.bufferSwitchTimeInfo() callback is supported.
3728 // For compatibility with ASIO 1.0 drivers the host application
3729 // should always support the "old" bufferSwitch method, too.
3732 case kAsioSupportsTimeCode:
3733 // Informs the driver whether application is interested in time
3734 // code info. If an application does not need to know about time
3735 // code, the driver has less work to do.
3742 static const char* getAsioErrorString( ASIOError result )
3750 static const Messages m[] =
3752 { ASE_NotPresent, "Hardware input or output is not present or available." },
3753 { ASE_HWMalfunction, "Hardware is malfunctioning." },
3754 { ASE_InvalidParameter, "Invalid input parameter." },
3755 { ASE_InvalidMode, "Invalid mode." },
3756 { ASE_SPNotAdvancing, "Sample position not advancing." },
3757 { ASE_NoClock, "Sample clock or rate cannot be determined or is not present." },
3758 { ASE_NoMemory, "Not enough memory to complete the request." }
3761 for ( unsigned int i = 0; i < sizeof(m)/sizeof(m[0]); ++i )
3762 if ( m[i].value == result ) return m[i].message;
3764 return "Unknown error.";
3767 //******************** End of __WINDOWS_ASIO__ *********************//
3771 #if defined(__WINDOWS_WASAPI__) // Windows WASAPI API
3773 // Authored by Marcus Tomlinson <themarcustomlinson@gmail.com>, April 2014
3774 // - Introduces support for the Windows WASAPI API
3775 // - Aims to deliver bit streams to and from hardware at the lowest possible latency, via the absolute minimum buffer sizes required
3776 // - Provides flexible stream configuration to an otherwise strict and inflexible WASAPI interface
3777 // - Includes automatic internal conversion of sample rate and buffer size between hardware and the user
3784 #include <mferror.h>
3786 #include <mftransform.h>
3787 #include <wmcodecdsp.h>
3789 #include <audioclient.h>
3791 #include <mmdeviceapi.h>
3792 #include <functiondiscoverykeys_devpkey.h>
3794 #ifndef MF_E_TRANSFORM_NEED_MORE_INPUT
3795 #define MF_E_TRANSFORM_NEED_MORE_INPUT _HRESULT_TYPEDEF_(0xc00d6d72)
3798 #ifndef MFSTARTUP_NOSOCKET
3799 #define MFSTARTUP_NOSOCKET 0x1
3803 #pragma comment( lib, "ksuser" )
3804 #pragma comment( lib, "mfplat.lib" )
3805 #pragma comment( lib, "mfuuid.lib" )
3806 #pragma comment( lib, "wmcodecdspuuid" )
3809 //=============================================================================
3811 #define SAFE_RELEASE( objectPtr )\
3814 objectPtr->Release();\
3818 typedef HANDLE ( __stdcall *TAvSetMmThreadCharacteristicsPtr )( LPCWSTR TaskName, LPDWORD TaskIndex );
3820 //-----------------------------------------------------------------------------
3822 // WASAPI dictates stream sample rate, format, channel count, and in some cases, buffer size.
3823 // Therefore we must perform all necessary conversions to user buffers in order to satisfy these
3824 // requirements. WasapiBuffer ring buffers are used between HwIn->UserIn and UserOut->HwOut to
3825 // provide intermediate storage for read / write synchronization.
3839 // sets the length of the internal ring buffer
3840 void setBufferSize( unsigned int bufferSize, unsigned int formatBytes ) {
3843 buffer_ = ( char* ) calloc( bufferSize, formatBytes );
3845 bufferSize_ = bufferSize;
3850 // attempt to push a buffer into the ring buffer at the current "in" index
3851 bool pushBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
3853 if ( !buffer || // incoming buffer is NULL
3854 bufferSize == 0 || // incoming buffer has no data
3855 bufferSize > bufferSize_ ) // incoming buffer too large
3860 unsigned int relOutIndex = outIndex_;
3861 unsigned int inIndexEnd = inIndex_ + bufferSize;
3862 if ( relOutIndex < inIndex_ && inIndexEnd >= bufferSize_ ) {
3863 relOutIndex += bufferSize_;
3866 // the "IN" index CAN BEGIN at the "OUT" index
3867 // the "IN" index CANNOT END at the "OUT" index
3868 if ( inIndex_ < relOutIndex && inIndexEnd >= relOutIndex ) {
3869 return false; // not enough space between "in" index and "out" index
3872 // copy buffer from external to internal
3873 int fromZeroSize = inIndex_ + bufferSize - bufferSize_;
3874 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
3875 int fromInSize = bufferSize - fromZeroSize;
3880 memcpy( &( ( char* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( char ) );
3881 memcpy( buffer_, &( ( char* ) buffer )[fromInSize], fromZeroSize * sizeof( char ) );
3883 case RTAUDIO_SINT16:
3884 memcpy( &( ( short* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( short ) );
3885 memcpy( buffer_, &( ( short* ) buffer )[fromInSize], fromZeroSize * sizeof( short ) );
3887 case RTAUDIO_SINT24:
3888 memcpy( &( ( S24* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( S24 ) );
3889 memcpy( buffer_, &( ( S24* ) buffer )[fromInSize], fromZeroSize * sizeof( S24 ) );
3891 case RTAUDIO_SINT32:
3892 memcpy( &( ( int* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( int ) );
3893 memcpy( buffer_, &( ( int* ) buffer )[fromInSize], fromZeroSize * sizeof( int ) );
3895 case RTAUDIO_FLOAT32:
3896 memcpy( &( ( float* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( float ) );
3897 memcpy( buffer_, &( ( float* ) buffer )[fromInSize], fromZeroSize * sizeof( float ) );
3899 case RTAUDIO_FLOAT64:
3900 memcpy( &( ( double* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( double ) );
3901 memcpy( buffer_, &( ( double* ) buffer )[fromInSize], fromZeroSize * sizeof( double ) );
3905 // update "in" index
3906 inIndex_ += bufferSize;
3907 inIndex_ %= bufferSize_;
3912 // attempt to pull a buffer from the ring buffer from the current "out" index
3913 bool pullBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
3915 if ( !buffer || // incoming buffer is NULL
3916 bufferSize == 0 || // incoming buffer has no data
3917 bufferSize > bufferSize_ ) // incoming buffer too large
3922 unsigned int relInIndex = inIndex_;
3923 unsigned int outIndexEnd = outIndex_ + bufferSize;
3924 if ( relInIndex < outIndex_ && outIndexEnd >= bufferSize_ ) {
3925 relInIndex += bufferSize_;
3928 // the "OUT" index CANNOT BEGIN at the "IN" index
3929 // the "OUT" index CAN END at the "IN" index
3930 if ( outIndex_ <= relInIndex && outIndexEnd > relInIndex ) {
3931 return false; // not enough space between "out" index and "in" index
3934 // copy buffer from internal to external
3935 int fromZeroSize = outIndex_ + bufferSize - bufferSize_;
3936 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
3937 int fromOutSize = bufferSize - fromZeroSize;
3942 memcpy( buffer, &( ( char* ) buffer_ )[outIndex_], fromOutSize * sizeof( char ) );
3943 memcpy( &( ( char* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( char ) );
3945 case RTAUDIO_SINT16:
3946 memcpy( buffer, &( ( short* ) buffer_ )[outIndex_], fromOutSize * sizeof( short ) );
3947 memcpy( &( ( short* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( short ) );
3949 case RTAUDIO_SINT24:
3950 memcpy( buffer, &( ( S24* ) buffer_ )[outIndex_], fromOutSize * sizeof( S24 ) );
3951 memcpy( &( ( S24* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( S24 ) );
3953 case RTAUDIO_SINT32:
3954 memcpy( buffer, &( ( int* ) buffer_ )[outIndex_], fromOutSize * sizeof( int ) );
3955 memcpy( &( ( int* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( int ) );
3957 case RTAUDIO_FLOAT32:
3958 memcpy( buffer, &( ( float* ) buffer_ )[outIndex_], fromOutSize * sizeof( float ) );
3959 memcpy( &( ( float* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( float ) );
3961 case RTAUDIO_FLOAT64:
3962 memcpy( buffer, &( ( double* ) buffer_ )[outIndex_], fromOutSize * sizeof( double ) );
3963 memcpy( &( ( double* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( double ) );
3967 // update "out" index
3968 outIndex_ += bufferSize;
3969 outIndex_ %= bufferSize_;
3976 unsigned int bufferSize_;
3977 unsigned int inIndex_;
3978 unsigned int outIndex_;
3981 //-----------------------------------------------------------------------------
3983 // In order to satisfy WASAPI's buffer requirements, we need a means of converting sample rate
3984 // between HW and the user. The WasapiResampler class is used to perform this conversion between
3985 // HwIn->UserIn and UserOut->HwOut during the stream callback loop.
3986 class WasapiResampler
3989 WasapiResampler( bool isFloat, unsigned int bitsPerSample, unsigned int channelCount,
3990 unsigned int inSampleRate, unsigned int outSampleRate )
3991 : _bytesPerSample( bitsPerSample / 8 )
3992 , _channelCount( channelCount )
3993 , _sampleRatio( ( float ) outSampleRate / inSampleRate )
3994 , _transformUnk( NULL )
3995 , _transform( NULL )
3996 , _mediaType( NULL )
3997 , _inputMediaType( NULL )
3998 , _outputMediaType( NULL )
4000 #ifdef __IWMResamplerProps_FWD_DEFINED__
4001 , _resamplerProps( NULL )
4004 // 1. Initialization
4006 MFStartup( MF_VERSION, MFSTARTUP_NOSOCKET );
4008 // 2. Create Resampler Transform Object
4010 CoCreateInstance( CLSID_CResamplerMediaObject, NULL, CLSCTX_INPROC_SERVER,
4011 IID_IUnknown, ( void** ) &_transformUnk );
4013 _transformUnk->QueryInterface( IID_PPV_ARGS( &_transform ) );
4015 #ifdef __IWMResamplerProps_FWD_DEFINED__
4016 _transformUnk->QueryInterface( IID_PPV_ARGS( &_resamplerProps ) );
4017 _resamplerProps->SetHalfFilterLength( 60 ); // best conversion quality
4020 // 3. Specify input / output format
4022 MFCreateMediaType( &_mediaType );
4023 _mediaType->SetGUID( MF_MT_MAJOR_TYPE, MFMediaType_Audio );
4024 _mediaType->SetGUID( MF_MT_SUBTYPE, isFloat ? MFAudioFormat_Float : MFAudioFormat_PCM );
4025 _mediaType->SetUINT32( MF_MT_AUDIO_NUM_CHANNELS, channelCount );
4026 _mediaType->SetUINT32( MF_MT_AUDIO_SAMPLES_PER_SECOND, inSampleRate );
4027 _mediaType->SetUINT32( MF_MT_AUDIO_BLOCK_ALIGNMENT, _bytesPerSample * channelCount );
4028 _mediaType->SetUINT32( MF_MT_AUDIO_AVG_BYTES_PER_SECOND, _bytesPerSample * channelCount * inSampleRate );
4029 _mediaType->SetUINT32( MF_MT_AUDIO_BITS_PER_SAMPLE, bitsPerSample );
4030 _mediaType->SetUINT32( MF_MT_ALL_SAMPLES_INDEPENDENT, TRUE );
4032 MFCreateMediaType( &_inputMediaType );
4033 _mediaType->CopyAllItems( _inputMediaType );
4035 _transform->SetInputType( 0, _inputMediaType, 0 );
4037 MFCreateMediaType( &_outputMediaType );
4038 _mediaType->CopyAllItems( _outputMediaType );
4040 _outputMediaType->SetUINT32( MF_MT_AUDIO_SAMPLES_PER_SECOND, outSampleRate );
4041 _outputMediaType->SetUINT32( MF_MT_AUDIO_AVG_BYTES_PER_SECOND, _bytesPerSample * channelCount * outSampleRate );
4043 _transform->SetOutputType( 0, _outputMediaType, 0 );
4045 // 4. Send stream start messages to Resampler
4047 _transform->ProcessMessage( MFT_MESSAGE_COMMAND_FLUSH, 0 );
4048 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_BEGIN_STREAMING, 0 );
4049 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_START_OF_STREAM, 0 );
4054 // 8. Send stream stop messages to Resampler
4056 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_END_OF_STREAM, 0 );
4057 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_END_STREAMING, 0 );
4063 SAFE_RELEASE( _transformUnk );
4064 SAFE_RELEASE( _transform );
4065 SAFE_RELEASE( _mediaType );
4066 SAFE_RELEASE( _inputMediaType );
4067 SAFE_RELEASE( _outputMediaType );
4069 #ifdef __IWMResamplerProps_FWD_DEFINED__
4070 SAFE_RELEASE( _resamplerProps );
4074 void Convert( char* outBuffer, const char* inBuffer, unsigned int inSampleCount, unsigned int& outSampleCount )
4076 unsigned int inputBufferSize = _bytesPerSample * _channelCount * inSampleCount;
4077 if ( _sampleRatio == 1 )
4079 // no sample rate conversion required
4080 memcpy( outBuffer, inBuffer, inputBufferSize );
4081 outSampleCount = inSampleCount;
4085 unsigned int outputBufferSize = ( unsigned int ) ceilf( inputBufferSize * _sampleRatio ) + ( _bytesPerSample * _channelCount );
4087 IMFMediaBuffer* rInBuffer;
4088 IMFSample* rInSample;
4089 BYTE* rInByteBuffer = NULL;
4091 // 5. Create Sample object from input data
4093 MFCreateMemoryBuffer( inputBufferSize, &rInBuffer );
4095 rInBuffer->Lock( &rInByteBuffer, NULL, NULL );
4096 memcpy( rInByteBuffer, inBuffer, inputBufferSize );
4097 rInBuffer->Unlock();
4098 rInByteBuffer = NULL;
4100 rInBuffer->SetCurrentLength( inputBufferSize );
4102 MFCreateSample( &rInSample );
4103 rInSample->AddBuffer( rInBuffer );
4105 // 6. Pass input data to Resampler
4107 _transform->ProcessInput( 0, rInSample, 0 );
4109 SAFE_RELEASE( rInBuffer );
4110 SAFE_RELEASE( rInSample );
4112 // 7. Perform sample rate conversion
4114 IMFMediaBuffer* rOutBuffer = NULL;
4115 BYTE* rOutByteBuffer = NULL;
4117 MFT_OUTPUT_DATA_BUFFER rOutDataBuffer;
4119 DWORD rBytes = outputBufferSize; // maximum bytes accepted per ProcessOutput
4121 // 7.1 Create Sample object for output data
4123 memset( &rOutDataBuffer, 0, sizeof rOutDataBuffer );
4124 MFCreateSample( &( rOutDataBuffer.pSample ) );
4125 MFCreateMemoryBuffer( rBytes, &rOutBuffer );
4126 rOutDataBuffer.pSample->AddBuffer( rOutBuffer );
4127 rOutDataBuffer.dwStreamID = 0;
4128 rOutDataBuffer.dwStatus = 0;
4129 rOutDataBuffer.pEvents = NULL;
4131 // 7.2 Get output data from Resampler
4133 if ( _transform->ProcessOutput( 0, 1, &rOutDataBuffer, &rStatus ) == MF_E_TRANSFORM_NEED_MORE_INPUT )
4136 SAFE_RELEASE( rOutBuffer );
4137 SAFE_RELEASE( rOutDataBuffer.pSample );
4141 // 7.3 Write output data to outBuffer
4143 SAFE_RELEASE( rOutBuffer );
4144 rOutDataBuffer.pSample->ConvertToContiguousBuffer( &rOutBuffer );
4145 rOutBuffer->GetCurrentLength( &rBytes );
4147 rOutBuffer->Lock( &rOutByteBuffer, NULL, NULL );
4148 memcpy( outBuffer, rOutByteBuffer, rBytes );
4149 rOutBuffer->Unlock();
4150 rOutByteBuffer = NULL;
4152 outSampleCount = rBytes / _bytesPerSample / _channelCount;
4153 SAFE_RELEASE( rOutBuffer );
4154 SAFE_RELEASE( rOutDataBuffer.pSample );
4158 unsigned int _bytesPerSample;
4159 unsigned int _channelCount;
4162 IUnknown* _transformUnk;
4163 IMFTransform* _transform;
4164 IMFMediaType* _mediaType;
4165 IMFMediaType* _inputMediaType;
4166 IMFMediaType* _outputMediaType;
4168 #ifdef __IWMResamplerProps_FWD_DEFINED__
4169 IWMResamplerProps* _resamplerProps;
4173 //-----------------------------------------------------------------------------
4175 // A structure to hold various information related to the WASAPI implementation.
4178 IAudioClient* captureAudioClient;
4179 IAudioClient* renderAudioClient;
4180 IAudioCaptureClient* captureClient;
4181 IAudioRenderClient* renderClient;
4182 HANDLE captureEvent;
4186 : captureAudioClient( NULL ),
4187 renderAudioClient( NULL ),
4188 captureClient( NULL ),
4189 renderClient( NULL ),
4190 captureEvent( NULL ),
4191 renderEvent( NULL ) {}
4194 //=============================================================================
4196 RtApiWasapi::RtApiWasapi()
4197 : coInitialized_( false ), deviceEnumerator_( NULL )
4199 // WASAPI can run either apartment or multi-threaded
4200 HRESULT hr = CoInitialize( NULL );
4201 if ( !FAILED( hr ) )
4202 coInitialized_ = true;
4204 // Instantiate device enumerator
4205 hr = CoCreateInstance( __uuidof( MMDeviceEnumerator ), NULL,
4206 CLSCTX_ALL, __uuidof( IMMDeviceEnumerator ),
4207 ( void** ) &deviceEnumerator_ );
4209 // If this runs on an old Windows, it will fail. Ignore and proceed.
4211 deviceEnumerator_ = NULL;
4214 //-----------------------------------------------------------------------------
4216 RtApiWasapi::~RtApiWasapi()
4218 if ( stream_.state != STREAM_CLOSED )
4221 SAFE_RELEASE( deviceEnumerator_ );
4223 // If this object previously called CoInitialize()
4224 if ( coInitialized_ )
4228 //=============================================================================
4230 unsigned int RtApiWasapi::getDeviceCount( void )
4232 unsigned int captureDeviceCount = 0;
4233 unsigned int renderDeviceCount = 0;
4235 IMMDeviceCollection* captureDevices = NULL;
4236 IMMDeviceCollection* renderDevices = NULL;
4238 if ( !deviceEnumerator_ )
4241 // Count capture devices
4243 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4244 if ( FAILED( hr ) ) {
4245 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device collection.";
4249 hr = captureDevices->GetCount( &captureDeviceCount );
4250 if ( FAILED( hr ) ) {
4251 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device count.";
4255 // Count render devices
4256 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4257 if ( FAILED( hr ) ) {
4258 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device collection.";
4262 hr = renderDevices->GetCount( &renderDeviceCount );
4263 if ( FAILED( hr ) ) {
4264 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device count.";
4269 // release all references
4270 SAFE_RELEASE( captureDevices );
4271 SAFE_RELEASE( renderDevices );
4273 if ( errorText_.empty() )
4274 return captureDeviceCount + renderDeviceCount;
4276 error( RtAudioError::DRIVER_ERROR );
4280 //-----------------------------------------------------------------------------
4282 RtAudio::DeviceInfo RtApiWasapi::getDeviceInfo( unsigned int device )
4284 RtAudio::DeviceInfo info;
4285 unsigned int captureDeviceCount = 0;
4286 unsigned int renderDeviceCount = 0;
4287 std::string defaultDeviceName;
4288 bool isCaptureDevice = false;
4290 PROPVARIANT deviceNameProp;
4291 PROPVARIANT defaultDeviceNameProp;
4293 IMMDeviceCollection* captureDevices = NULL;
4294 IMMDeviceCollection* renderDevices = NULL;
4295 IMMDevice* devicePtr = NULL;
4296 IMMDevice* defaultDevicePtr = NULL;
4297 IAudioClient* audioClient = NULL;
4298 IPropertyStore* devicePropStore = NULL;
4299 IPropertyStore* defaultDevicePropStore = NULL;
4301 WAVEFORMATEX* deviceFormat = NULL;
4302 WAVEFORMATEX* closestMatchFormat = NULL;
4305 info.probed = false;
4307 // Count capture devices
4309 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4310 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4311 if ( FAILED( hr ) ) {
4312 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device collection.";
4316 hr = captureDevices->GetCount( &captureDeviceCount );
4317 if ( FAILED( hr ) ) {
4318 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device count.";
4322 // Count render devices
4323 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4324 if ( FAILED( hr ) ) {
4325 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device collection.";
4329 hr = renderDevices->GetCount( &renderDeviceCount );
4330 if ( FAILED( hr ) ) {
4331 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device count.";
4335 // validate device index
4336 if ( device >= captureDeviceCount + renderDeviceCount ) {
4337 errorText_ = "RtApiWasapi::getDeviceInfo: Invalid device index.";
4338 errorType = RtAudioError::INVALID_USE;
4342 // determine whether index falls within capture or render devices
4343 if ( device >= renderDeviceCount ) {
4344 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
4345 if ( FAILED( hr ) ) {
4346 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device handle.";
4349 isCaptureDevice = true;
4352 hr = renderDevices->Item( device, &devicePtr );
4353 if ( FAILED( hr ) ) {
4354 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device handle.";
4357 isCaptureDevice = false;
4360 // get default device name
4361 if ( isCaptureDevice ) {
4362 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eCapture, eConsole, &defaultDevicePtr );
4363 if ( FAILED( hr ) ) {
4364 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default capture device handle.";
4369 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eRender, eConsole, &defaultDevicePtr );
4370 if ( FAILED( hr ) ) {
4371 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default render device handle.";
4376 hr = defaultDevicePtr->OpenPropertyStore( STGM_READ, &defaultDevicePropStore );
4377 if ( FAILED( hr ) ) {
4378 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open default device property store.";
4381 PropVariantInit( &defaultDeviceNameProp );
4383 hr = defaultDevicePropStore->GetValue( PKEY_Device_FriendlyName, &defaultDeviceNameProp );
4384 if ( FAILED( hr ) ) {
4385 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default device property: PKEY_Device_FriendlyName.";
4389 defaultDeviceName = convertCharPointerToStdString(defaultDeviceNameProp.pwszVal);
4392 hr = devicePtr->OpenPropertyStore( STGM_READ, &devicePropStore );
4393 if ( FAILED( hr ) ) {
4394 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open device property store.";
4398 PropVariantInit( &deviceNameProp );
4400 hr = devicePropStore->GetValue( PKEY_Device_FriendlyName, &deviceNameProp );
4401 if ( FAILED( hr ) ) {
4402 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device property: PKEY_Device_FriendlyName.";
4406 info.name =convertCharPointerToStdString(deviceNameProp.pwszVal);
4409 if ( isCaptureDevice ) {
4410 info.isDefaultInput = info.name == defaultDeviceName;
4411 info.isDefaultOutput = false;
4414 info.isDefaultInput = false;
4415 info.isDefaultOutput = info.name == defaultDeviceName;
4419 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL, NULL, ( void** ) &audioClient );
4420 if ( FAILED( hr ) ) {
4421 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device audio client.";
4425 hr = audioClient->GetMixFormat( &deviceFormat );
4426 if ( FAILED( hr ) ) {
4427 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device mix format.";
4431 if ( isCaptureDevice ) {
4432 info.inputChannels = deviceFormat->nChannels;
4433 info.outputChannels = 0;
4434 info.duplexChannels = 0;
4437 info.inputChannels = 0;
4438 info.outputChannels = deviceFormat->nChannels;
4439 info.duplexChannels = 0;
4443 info.sampleRates.clear();
4445 // allow support for all sample rates as we have a built-in sample rate converter
4446 for ( unsigned int i = 0; i < MAX_SAMPLE_RATES; i++ ) {
4447 info.sampleRates.push_back( SAMPLE_RATES[i] );
4449 info.preferredSampleRate = deviceFormat->nSamplesPerSec;
4452 info.nativeFormats = 0;
4454 if ( deviceFormat->wFormatTag == WAVE_FORMAT_IEEE_FLOAT ||
4455 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
4456 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_IEEE_FLOAT ) )
4458 if ( deviceFormat->wBitsPerSample == 32 ) {
4459 info.nativeFormats |= RTAUDIO_FLOAT32;
4461 else if ( deviceFormat->wBitsPerSample == 64 ) {
4462 info.nativeFormats |= RTAUDIO_FLOAT64;
4465 else if ( deviceFormat->wFormatTag == WAVE_FORMAT_PCM ||
4466 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
4467 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_PCM ) )
4469 if ( deviceFormat->wBitsPerSample == 8 ) {
4470 info.nativeFormats |= RTAUDIO_SINT8;
4472 else if ( deviceFormat->wBitsPerSample == 16 ) {
4473 info.nativeFormats |= RTAUDIO_SINT16;
4475 else if ( deviceFormat->wBitsPerSample == 24 ) {
4476 info.nativeFormats |= RTAUDIO_SINT24;
4478 else if ( deviceFormat->wBitsPerSample == 32 ) {
4479 info.nativeFormats |= RTAUDIO_SINT32;
4487 // release all references
4488 PropVariantClear( &deviceNameProp );
4489 PropVariantClear( &defaultDeviceNameProp );
4491 SAFE_RELEASE( captureDevices );
4492 SAFE_RELEASE( renderDevices );
4493 SAFE_RELEASE( devicePtr );
4494 SAFE_RELEASE( defaultDevicePtr );
4495 SAFE_RELEASE( audioClient );
4496 SAFE_RELEASE( devicePropStore );
4497 SAFE_RELEASE( defaultDevicePropStore );
4499 CoTaskMemFree( deviceFormat );
4500 CoTaskMemFree( closestMatchFormat );
4502 if ( !errorText_.empty() )
4507 //-----------------------------------------------------------------------------
4509 unsigned int RtApiWasapi::getDefaultOutputDevice( void )
4511 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
4512 if ( getDeviceInfo( i ).isDefaultOutput ) {
4520 //-----------------------------------------------------------------------------
4522 unsigned int RtApiWasapi::getDefaultInputDevice( void )
4524 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
4525 if ( getDeviceInfo( i ).isDefaultInput ) {
4533 //-----------------------------------------------------------------------------
4535 void RtApiWasapi::closeStream( void )
4537 if ( stream_.state == STREAM_CLOSED ) {
4538 errorText_ = "RtApiWasapi::closeStream: No open stream to close.";
4539 error( RtAudioError::WARNING );
4543 if ( stream_.state != STREAM_STOPPED )
4546 // clean up stream memory
4547 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient )
4548 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient )
4550 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureClient )
4551 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderClient )
4553 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent )
4554 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent );
4556 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent )
4557 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent );
4559 delete ( WasapiHandle* ) stream_.apiHandle;
4560 stream_.apiHandle = NULL;
4562 for ( int i = 0; i < 2; i++ ) {
4563 if ( stream_.userBuffer[i] ) {
4564 free( stream_.userBuffer[i] );
4565 stream_.userBuffer[i] = 0;
4569 if ( stream_.deviceBuffer ) {
4570 free( stream_.deviceBuffer );
4571 stream_.deviceBuffer = 0;
4574 // update stream state
4575 stream_.state = STREAM_CLOSED;
4578 //-----------------------------------------------------------------------------
4580 void RtApiWasapi::startStream( void )
4584 if ( stream_.state == STREAM_RUNNING ) {
4585 errorText_ = "RtApiWasapi::startStream: The stream is already running.";
4586 error( RtAudioError::WARNING );
4590 #if defined( HAVE_GETTIMEOFDAY )
4591 gettimeofday( &stream_.lastTickTimestamp, NULL );
4594 // update stream state
4595 stream_.state = STREAM_RUNNING;
4597 // create WASAPI stream thread
4598 stream_.callbackInfo.thread = ( ThreadHandle ) CreateThread( NULL, 0, runWasapiThread, this, CREATE_SUSPENDED, NULL );
4600 if ( !stream_.callbackInfo.thread ) {
4601 errorText_ = "RtApiWasapi::startStream: Unable to instantiate callback thread.";
4602 error( RtAudioError::THREAD_ERROR );
4605 SetThreadPriority( ( void* ) stream_.callbackInfo.thread, stream_.callbackInfo.priority );
4606 ResumeThread( ( void* ) stream_.callbackInfo.thread );
4610 //-----------------------------------------------------------------------------
4612 void RtApiWasapi::stopStream( void )
4616 if ( stream_.state == STREAM_STOPPED ) {
4617 errorText_ = "RtApiWasapi::stopStream: The stream is already stopped.";
4618 error( RtAudioError::WARNING );
4622 // inform stream thread by setting stream state to STREAM_STOPPING
4623 stream_.state = STREAM_STOPPING;
4625 // wait until stream thread is stopped
4626 while( stream_.state != STREAM_STOPPED ) {
4630 // Wait for the last buffer to play before stopping.
4631 Sleep( 1000 * stream_.bufferSize / stream_.sampleRate );
4633 // close thread handle
4634 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
4635 errorText_ = "RtApiWasapi::stopStream: Unable to close callback thread.";
4636 error( RtAudioError::THREAD_ERROR );
4640 stream_.callbackInfo.thread = (ThreadHandle) NULL;
4643 //-----------------------------------------------------------------------------
4645 void RtApiWasapi::abortStream( void )
4649 if ( stream_.state == STREAM_STOPPED ) {
4650 errorText_ = "RtApiWasapi::abortStream: The stream is already stopped.";
4651 error( RtAudioError::WARNING );
4655 // inform stream thread by setting stream state to STREAM_STOPPING
4656 stream_.state = STREAM_STOPPING;
4658 // wait until stream thread is stopped
4659 while ( stream_.state != STREAM_STOPPED ) {
4663 // close thread handle
4664 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
4665 errorText_ = "RtApiWasapi::abortStream: Unable to close callback thread.";
4666 error( RtAudioError::THREAD_ERROR );
4670 stream_.callbackInfo.thread = (ThreadHandle) NULL;
4673 //-----------------------------------------------------------------------------
4675 bool RtApiWasapi::probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
4676 unsigned int firstChannel, unsigned int sampleRate,
4677 RtAudioFormat format, unsigned int* bufferSize,
4678 RtAudio::StreamOptions* options )
4680 bool methodResult = FAILURE;
4681 unsigned int captureDeviceCount = 0;
4682 unsigned int renderDeviceCount = 0;
4684 IMMDeviceCollection* captureDevices = NULL;
4685 IMMDeviceCollection* renderDevices = NULL;
4686 IMMDevice* devicePtr = NULL;
4687 WAVEFORMATEX* deviceFormat = NULL;
4688 unsigned int bufferBytes;
4689 stream_.state = STREAM_STOPPED;
4691 // create API Handle if not already created
4692 if ( !stream_.apiHandle )
4693 stream_.apiHandle = ( void* ) new WasapiHandle();
4695 // Count capture devices
4697 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4698 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4699 if ( FAILED( hr ) ) {
4700 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device collection.";
4704 hr = captureDevices->GetCount( &captureDeviceCount );
4705 if ( FAILED( hr ) ) {
4706 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device count.";
4710 // Count render devices
4711 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4712 if ( FAILED( hr ) ) {
4713 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device collection.";
4717 hr = renderDevices->GetCount( &renderDeviceCount );
4718 if ( FAILED( hr ) ) {
4719 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device count.";
4723 // validate device index
4724 if ( device >= captureDeviceCount + renderDeviceCount ) {
4725 errorType = RtAudioError::INVALID_USE;
4726 errorText_ = "RtApiWasapi::probeDeviceOpen: Invalid device index.";
4730 // if device index falls within capture devices
4731 if ( device >= renderDeviceCount ) {
4732 if ( mode != INPUT ) {
4733 errorType = RtAudioError::INVALID_USE;
4734 errorText_ = "RtApiWasapi::probeDeviceOpen: Capture device selected as output device.";
4738 // retrieve captureAudioClient from devicePtr
4739 IAudioClient*& captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4741 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
4742 if ( FAILED( hr ) ) {
4743 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device handle.";
4747 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4748 NULL, ( void** ) &captureAudioClient );
4749 if ( FAILED( hr ) ) {
4750 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device audio client.";
4754 hr = captureAudioClient->GetMixFormat( &deviceFormat );
4755 if ( FAILED( hr ) ) {
4756 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device mix format.";
4760 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4761 captureAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4764 // if device index falls within render devices and is configured for loopback
4765 if ( device < renderDeviceCount && mode == INPUT )
4767 // if renderAudioClient is not initialised, initialise it now
4768 IAudioClient*& renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4769 if ( !renderAudioClient )
4771 probeDeviceOpen( device, OUTPUT, channels, firstChannel, sampleRate, format, bufferSize, options );
4774 // retrieve captureAudioClient from devicePtr
4775 IAudioClient*& captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4777 hr = renderDevices->Item( device, &devicePtr );
4778 if ( FAILED( hr ) ) {
4779 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device handle.";
4783 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4784 NULL, ( void** ) &captureAudioClient );
4785 if ( FAILED( hr ) ) {
4786 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device audio client.";
4790 hr = captureAudioClient->GetMixFormat( &deviceFormat );
4791 if ( FAILED( hr ) ) {
4792 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device mix format.";
4796 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4797 captureAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4800 // if device index falls within render devices and is configured for output
4801 if ( device < renderDeviceCount && mode == OUTPUT )
4803 // if renderAudioClient is already initialised, don't initialise it again
4804 IAudioClient*& renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4805 if ( renderAudioClient )
4807 methodResult = SUCCESS;
4811 hr = renderDevices->Item( device, &devicePtr );
4812 if ( FAILED( hr ) ) {
4813 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device handle.";
4817 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4818 NULL, ( void** ) &renderAudioClient );
4819 if ( FAILED( hr ) ) {
4820 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device audio client.";
4824 hr = renderAudioClient->GetMixFormat( &deviceFormat );
4825 if ( FAILED( hr ) ) {
4826 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device mix format.";
4830 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4831 renderAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4835 if ( ( stream_.mode == OUTPUT && mode == INPUT ) ||
4836 ( stream_.mode == INPUT && mode == OUTPUT ) ) {
4837 stream_.mode = DUPLEX;
4840 stream_.mode = mode;
4843 stream_.device[mode] = device;
4844 stream_.doByteSwap[mode] = false;
4845 stream_.sampleRate = sampleRate;
4846 stream_.bufferSize = *bufferSize;
4847 stream_.nBuffers = 1;
4848 stream_.nUserChannels[mode] = channels;
4849 stream_.channelOffset[mode] = firstChannel;
4850 stream_.userFormat = format;
4851 stream_.deviceFormat[mode] = getDeviceInfo( device ).nativeFormats;
4853 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
4854 stream_.userInterleaved = false;
4856 stream_.userInterleaved = true;
4857 stream_.deviceInterleaved[mode] = true;
4859 // Set flags for buffer conversion.
4860 stream_.doConvertBuffer[mode] = false;
4861 if ( stream_.userFormat != stream_.deviceFormat[mode] ||
4862 stream_.nUserChannels[0] != stream_.nDeviceChannels[0] ||
4863 stream_.nUserChannels[1] != stream_.nDeviceChannels[1] )
4864 stream_.doConvertBuffer[mode] = true;
4865 else if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
4866 stream_.nUserChannels[mode] > 1 )
4867 stream_.doConvertBuffer[mode] = true;
4869 if ( stream_.doConvertBuffer[mode] )
4870 setConvertInfo( mode, firstChannel );
4872 // Allocate necessary internal buffers
4873 bufferBytes = stream_.nUserChannels[mode] * stream_.bufferSize * formatBytes( stream_.userFormat );
4875 stream_.userBuffer[mode] = ( char* ) calloc( bufferBytes, 1 );
4876 if ( !stream_.userBuffer[mode] ) {
4877 errorType = RtAudioError::MEMORY_ERROR;
4878 errorText_ = "RtApiWasapi::probeDeviceOpen: Error allocating user buffer memory.";
4882 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME )
4883 stream_.callbackInfo.priority = 15;
4885 stream_.callbackInfo.priority = 0;
4887 ///! TODO: RTAUDIO_MINIMIZE_LATENCY // Provide stream buffers directly to callback
4888 ///! TODO: RTAUDIO_HOG_DEVICE // Exclusive mode
4890 methodResult = SUCCESS;
4894 SAFE_RELEASE( captureDevices );
4895 SAFE_RELEASE( renderDevices );
4896 SAFE_RELEASE( devicePtr );
4897 CoTaskMemFree( deviceFormat );
4899 // if method failed, close the stream
4900 if ( methodResult == FAILURE )
4903 if ( !errorText_.empty() )
4905 return methodResult;
4908 //=============================================================================
4910 DWORD WINAPI RtApiWasapi::runWasapiThread( void* wasapiPtr )
4913 ( ( RtApiWasapi* ) wasapiPtr )->wasapiThread();
4918 DWORD WINAPI RtApiWasapi::stopWasapiThread( void* wasapiPtr )
4921 ( ( RtApiWasapi* ) wasapiPtr )->stopStream();
4926 DWORD WINAPI RtApiWasapi::abortWasapiThread( void* wasapiPtr )
4929 ( ( RtApiWasapi* ) wasapiPtr )->abortStream();
4934 //-----------------------------------------------------------------------------
4936 void RtApiWasapi::wasapiThread()
4938 // as this is a new thread, we must CoInitialize it
4939 CoInitialize( NULL );
4943 IAudioClient* captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4944 IAudioClient* renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4945 IAudioCaptureClient* captureClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureClient;
4946 IAudioRenderClient* renderClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderClient;
4947 HANDLE captureEvent = ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent;
4948 HANDLE renderEvent = ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent;
4950 WAVEFORMATEX* captureFormat = NULL;
4951 WAVEFORMATEX* renderFormat = NULL;
4952 float captureSrRatio = 0.0f;
4953 float renderSrRatio = 0.0f;
4954 WasapiBuffer captureBuffer;
4955 WasapiBuffer renderBuffer;
4956 WasapiResampler* captureResampler = NULL;
4957 WasapiResampler* renderResampler = NULL;
4959 // declare local stream variables
4960 RtAudioCallback callback = ( RtAudioCallback ) stream_.callbackInfo.callback;
4961 BYTE* streamBuffer = NULL;
4962 DWORD captureFlags = 0;
4963 unsigned int bufferFrameCount = 0;
4964 unsigned int numFramesPadding = 0;
4965 unsigned int convBufferSize = 0;
4966 bool loopbackEnabled = stream_.device[INPUT] == stream_.device[OUTPUT];
4967 bool callbackPushed = true;
4968 bool callbackPulled = false;
4969 bool callbackStopped = false;
4970 int callbackResult = 0;
4972 // convBuffer is used to store converted buffers between WASAPI and the user
4973 char* convBuffer = NULL;
4974 unsigned int convBuffSize = 0;
4975 unsigned int deviceBuffSize = 0;
4977 std::string errorText;
4978 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4980 // Attempt to assign "Pro Audio" characteristic to thread
4981 HMODULE AvrtDll = LoadLibrary( (LPCTSTR) "AVRT.dll" );
4983 DWORD taskIndex = 0;
4984 TAvSetMmThreadCharacteristicsPtr AvSetMmThreadCharacteristicsPtr =
4985 ( TAvSetMmThreadCharacteristicsPtr ) (void(*)()) GetProcAddress( AvrtDll, "AvSetMmThreadCharacteristicsW" );
4986 AvSetMmThreadCharacteristicsPtr( L"Pro Audio", &taskIndex );
4987 FreeLibrary( AvrtDll );
4990 // start capture stream if applicable
4991 if ( captureAudioClient ) {
4992 hr = captureAudioClient->GetMixFormat( &captureFormat );
4993 if ( FAILED( hr ) ) {
4994 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
4998 // init captureResampler
4999 captureResampler = new WasapiResampler( stream_.deviceFormat[INPUT] == RTAUDIO_FLOAT32 || stream_.deviceFormat[INPUT] == RTAUDIO_FLOAT64,
5000 formatBytes( stream_.deviceFormat[INPUT] ) * 8, stream_.nDeviceChannels[INPUT],
5001 captureFormat->nSamplesPerSec, stream_.sampleRate );
5003 captureSrRatio = ( ( float ) captureFormat->nSamplesPerSec / stream_.sampleRate );
5005 if ( !captureClient ) {
5006 hr = captureAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
5007 loopbackEnabled ? AUDCLNT_STREAMFLAGS_LOOPBACK : AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
5012 if ( FAILED( hr ) ) {
5013 errorText = "RtApiWasapi::wasapiThread: Unable to initialize capture audio client.";
5017 hr = captureAudioClient->GetService( __uuidof( IAudioCaptureClient ),
5018 ( void** ) &captureClient );
5019 if ( FAILED( hr ) ) {
5020 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve capture client handle.";
5024 // don't configure captureEvent if in loopback mode
5025 if ( !loopbackEnabled )
5027 // configure captureEvent to trigger on every available capture buffer
5028 captureEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
5029 if ( !captureEvent ) {
5030 errorType = RtAudioError::SYSTEM_ERROR;
5031 errorText = "RtApiWasapi::wasapiThread: Unable to create capture event.";
5035 hr = captureAudioClient->SetEventHandle( captureEvent );
5036 if ( FAILED( hr ) ) {
5037 errorText = "RtApiWasapi::wasapiThread: Unable to set capture event handle.";
5041 ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent = captureEvent;
5044 ( ( WasapiHandle* ) stream_.apiHandle )->captureClient = captureClient;
5046 // reset the capture stream
5047 hr = captureAudioClient->Reset();
5048 if ( FAILED( hr ) ) {
5049 errorText = "RtApiWasapi::wasapiThread: Unable to reset capture stream.";
5053 // start the capture stream
5054 hr = captureAudioClient->Start();
5055 if ( FAILED( hr ) ) {
5056 errorText = "RtApiWasapi::wasapiThread: Unable to start capture stream.";
5061 unsigned int inBufferSize = 0;
5062 hr = captureAudioClient->GetBufferSize( &inBufferSize );
5063 if ( FAILED( hr ) ) {
5064 errorText = "RtApiWasapi::wasapiThread: Unable to get capture buffer size.";
5068 // scale outBufferSize according to stream->user sample rate ratio
5069 unsigned int outBufferSize = ( unsigned int ) ceilf( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT];
5070 inBufferSize *= stream_.nDeviceChannels[INPUT];
5072 // set captureBuffer size
5073 captureBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[INPUT] ) );
5076 // start render stream if applicable
5077 if ( renderAudioClient ) {
5078 hr = renderAudioClient->GetMixFormat( &renderFormat );
5079 if ( FAILED( hr ) ) {
5080 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
5084 // init renderResampler
5085 renderResampler = new WasapiResampler( stream_.deviceFormat[OUTPUT] == RTAUDIO_FLOAT32 || stream_.deviceFormat[OUTPUT] == RTAUDIO_FLOAT64,
5086 formatBytes( stream_.deviceFormat[OUTPUT] ) * 8, stream_.nDeviceChannels[OUTPUT],
5087 stream_.sampleRate, renderFormat->nSamplesPerSec );
5089 renderSrRatio = ( ( float ) renderFormat->nSamplesPerSec / stream_.sampleRate );
5091 if ( !renderClient ) {
5092 hr = renderAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
5093 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
5098 if ( FAILED( hr ) ) {
5099 errorText = "RtApiWasapi::wasapiThread: Unable to initialize render audio client.";
5103 hr = renderAudioClient->GetService( __uuidof( IAudioRenderClient ),
5104 ( void** ) &renderClient );
5105 if ( FAILED( hr ) ) {
5106 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve render client handle.";
5110 // configure renderEvent to trigger on every available render buffer
5111 renderEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
5112 if ( !renderEvent ) {
5113 errorType = RtAudioError::SYSTEM_ERROR;
5114 errorText = "RtApiWasapi::wasapiThread: Unable to create render event.";
5118 hr = renderAudioClient->SetEventHandle( renderEvent );
5119 if ( FAILED( hr ) ) {
5120 errorText = "RtApiWasapi::wasapiThread: Unable to set render event handle.";
5124 ( ( WasapiHandle* ) stream_.apiHandle )->renderClient = renderClient;
5125 ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent = renderEvent;
5127 // reset the render stream
5128 hr = renderAudioClient->Reset();
5129 if ( FAILED( hr ) ) {
5130 errorText = "RtApiWasapi::wasapiThread: Unable to reset render stream.";
5134 // start the render stream
5135 hr = renderAudioClient->Start();
5136 if ( FAILED( hr ) ) {
5137 errorText = "RtApiWasapi::wasapiThread: Unable to start render stream.";
5142 unsigned int outBufferSize = 0;
5143 hr = renderAudioClient->GetBufferSize( &outBufferSize );
5144 if ( FAILED( hr ) ) {
5145 errorText = "RtApiWasapi::wasapiThread: Unable to get render buffer size.";
5149 // scale inBufferSize according to user->stream sample rate ratio
5150 unsigned int inBufferSize = ( unsigned int ) ceilf( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT];
5151 outBufferSize *= stream_.nDeviceChannels[OUTPUT];
5153 // set renderBuffer size
5154 renderBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[OUTPUT] ) );
5157 // malloc buffer memory
5158 if ( stream_.mode == INPUT )
5160 using namespace std; // for ceilf
5161 convBuffSize = ( size_t ) ( ceilf( stream_.bufferSize * captureSrRatio ) ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
5162 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
5164 else if ( stream_.mode == OUTPUT )
5166 convBuffSize = ( size_t ) ( ceilf( stream_.bufferSize * renderSrRatio ) ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
5167 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
5169 else if ( stream_.mode == DUPLEX )
5171 convBuffSize = std::max( ( size_t ) ( ceilf( stream_.bufferSize * captureSrRatio ) ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
5172 ( size_t ) ( ceilf( stream_.bufferSize * renderSrRatio ) ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
5173 deviceBuffSize = std::max( stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
5174 stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
5177 convBuffSize *= 2; // allow overflow for *SrRatio remainders
5178 convBuffer = ( char* ) calloc( convBuffSize, 1 );
5179 stream_.deviceBuffer = ( char* ) calloc( deviceBuffSize, 1 );
5180 if ( !convBuffer || !stream_.deviceBuffer ) {
5181 errorType = RtAudioError::MEMORY_ERROR;
5182 errorText = "RtApiWasapi::wasapiThread: Error allocating device buffer memory.";
5186 // stream process loop
5187 while ( stream_.state != STREAM_STOPPING ) {
5188 if ( !callbackPulled ) {
5191 // 1. Pull callback buffer from inputBuffer
5192 // 2. If 1. was successful: Convert callback buffer to user sample rate and channel count
5193 // Convert callback buffer to user format
5195 if ( captureAudioClient )
5197 int samplesToPull = ( unsigned int ) floorf( stream_.bufferSize * captureSrRatio );
5198 if ( captureSrRatio != 1 )
5200 // account for remainders
5205 while ( convBufferSize < stream_.bufferSize )
5207 // Pull callback buffer from inputBuffer
5208 callbackPulled = captureBuffer.pullBuffer( convBuffer,
5209 samplesToPull * stream_.nDeviceChannels[INPUT],
5210 stream_.deviceFormat[INPUT] );
5212 if ( !callbackPulled )
5217 // Convert callback buffer to user sample rate
5218 unsigned int deviceBufferOffset = convBufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
5219 unsigned int convSamples = 0;
5221 captureResampler->Convert( stream_.deviceBuffer + deviceBufferOffset,
5226 convBufferSize += convSamples;
5227 samplesToPull = 1; // now pull one sample at a time until we have stream_.bufferSize samples
5230 if ( callbackPulled )
5232 if ( stream_.doConvertBuffer[INPUT] ) {
5233 // Convert callback buffer to user format
5234 convertBuffer( stream_.userBuffer[INPUT],
5235 stream_.deviceBuffer,
5236 stream_.convertInfo[INPUT] );
5239 // no further conversion, simple copy deviceBuffer to userBuffer
5240 memcpy( stream_.userBuffer[INPUT],
5241 stream_.deviceBuffer,
5242 stream_.bufferSize * stream_.nUserChannels[INPUT] * formatBytes( stream_.userFormat ) );
5247 // if there is no capture stream, set callbackPulled flag
5248 callbackPulled = true;
5253 // 1. Execute user callback method
5254 // 2. Handle return value from callback
5256 // if callback has not requested the stream to stop
5257 if ( callbackPulled && !callbackStopped ) {
5258 // Execute user callback method
5259 callbackResult = callback( stream_.userBuffer[OUTPUT],
5260 stream_.userBuffer[INPUT],
5263 captureFlags & AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY ? RTAUDIO_INPUT_OVERFLOW : 0,
5264 stream_.callbackInfo.userData );
5267 RtApi::tickStreamTime();
5269 // Handle return value from callback
5270 if ( callbackResult == 1 ) {
5271 // instantiate a thread to stop this thread
5272 HANDLE threadHandle = CreateThread( NULL, 0, stopWasapiThread, this, 0, NULL );
5273 if ( !threadHandle ) {
5274 errorType = RtAudioError::THREAD_ERROR;
5275 errorText = "RtApiWasapi::wasapiThread: Unable to instantiate stream stop thread.";
5278 else if ( !CloseHandle( threadHandle ) ) {
5279 errorType = RtAudioError::THREAD_ERROR;
5280 errorText = "RtApiWasapi::wasapiThread: Unable to close stream stop thread handle.";
5284 callbackStopped = true;
5286 else if ( callbackResult == 2 ) {
5287 // instantiate a thread to stop this thread
5288 HANDLE threadHandle = CreateThread( NULL, 0, abortWasapiThread, this, 0, NULL );
5289 if ( !threadHandle ) {
5290 errorType = RtAudioError::THREAD_ERROR;
5291 errorText = "RtApiWasapi::wasapiThread: Unable to instantiate stream abort thread.";
5294 else if ( !CloseHandle( threadHandle ) ) {
5295 errorType = RtAudioError::THREAD_ERROR;
5296 errorText = "RtApiWasapi::wasapiThread: Unable to close stream abort thread handle.";
5300 callbackStopped = true;
5307 // 1. Convert callback buffer to stream format
5308 // 2. Convert callback buffer to stream sample rate and channel count
5309 // 3. Push callback buffer into outputBuffer
5311 if ( renderAudioClient && callbackPulled )
5313 // if the last call to renderBuffer.PushBuffer() was successful
5314 if ( callbackPushed || convBufferSize == 0 )
5316 if ( stream_.doConvertBuffer[OUTPUT] )
5318 // Convert callback buffer to stream format
5319 convertBuffer( stream_.deviceBuffer,
5320 stream_.userBuffer[OUTPUT],
5321 stream_.convertInfo[OUTPUT] );
5325 // no further conversion, simple copy userBuffer to deviceBuffer
5326 memcpy( stream_.deviceBuffer,
5327 stream_.userBuffer[OUTPUT],
5328 stream_.bufferSize * stream_.nUserChannels[OUTPUT] * formatBytes( stream_.userFormat ) );
5331 // Convert callback buffer to stream sample rate
5332 renderResampler->Convert( convBuffer,
5333 stream_.deviceBuffer,
5338 // Push callback buffer into outputBuffer
5339 callbackPushed = renderBuffer.pushBuffer( convBuffer,
5340 convBufferSize * stream_.nDeviceChannels[OUTPUT],
5341 stream_.deviceFormat[OUTPUT] );
5344 // if there is no render stream, set callbackPushed flag
5345 callbackPushed = true;
5350 // 1. Get capture buffer from stream
5351 // 2. Push capture buffer into inputBuffer
5352 // 3. If 2. was successful: Release capture buffer
5354 if ( captureAudioClient ) {
5355 // if the callback input buffer was not pulled from captureBuffer, wait for next capture event
5356 if ( !callbackPulled ) {
5357 WaitForSingleObject( loopbackEnabled ? renderEvent : captureEvent, INFINITE );
5360 // Get capture buffer from stream
5361 hr = captureClient->GetBuffer( &streamBuffer,
5363 &captureFlags, NULL, NULL );
5364 if ( FAILED( hr ) ) {
5365 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve capture buffer.";
5369 if ( bufferFrameCount != 0 ) {
5370 // Push capture buffer into inputBuffer
5371 if ( captureBuffer.pushBuffer( ( char* ) streamBuffer,
5372 bufferFrameCount * stream_.nDeviceChannels[INPUT],
5373 stream_.deviceFormat[INPUT] ) )
5375 // Release capture buffer
5376 hr = captureClient->ReleaseBuffer( bufferFrameCount );
5377 if ( FAILED( hr ) ) {
5378 errorText = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5384 // Inform WASAPI that capture was unsuccessful
5385 hr = captureClient->ReleaseBuffer( 0 );
5386 if ( FAILED( hr ) ) {
5387 errorText = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5394 // Inform WASAPI that capture was unsuccessful
5395 hr = captureClient->ReleaseBuffer( 0 );
5396 if ( FAILED( hr ) ) {
5397 errorText = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5405 // 1. Get render buffer from stream
5406 // 2. Pull next buffer from outputBuffer
5407 // 3. If 2. was successful: Fill render buffer with next buffer
5408 // Release render buffer
5410 if ( renderAudioClient ) {
5411 // if the callback output buffer was not pushed to renderBuffer, wait for next render event
5412 if ( callbackPulled && !callbackPushed ) {
5413 WaitForSingleObject( renderEvent, INFINITE );
5416 // Get render buffer from stream
5417 hr = renderAudioClient->GetBufferSize( &bufferFrameCount );
5418 if ( FAILED( hr ) ) {
5419 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer size.";
5423 hr = renderAudioClient->GetCurrentPadding( &numFramesPadding );
5424 if ( FAILED( hr ) ) {
5425 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer padding.";
5429 bufferFrameCount -= numFramesPadding;
5431 if ( bufferFrameCount != 0 ) {
5432 hr = renderClient->GetBuffer( bufferFrameCount, &streamBuffer );
5433 if ( FAILED( hr ) ) {
5434 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer.";
5438 // Pull next buffer from outputBuffer
5439 // Fill render buffer with next buffer
5440 if ( renderBuffer.pullBuffer( ( char* ) streamBuffer,
5441 bufferFrameCount * stream_.nDeviceChannels[OUTPUT],
5442 stream_.deviceFormat[OUTPUT] ) )
5444 // Release render buffer
5445 hr = renderClient->ReleaseBuffer( bufferFrameCount, 0 );
5446 if ( FAILED( hr ) ) {
5447 errorText = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5453 // Inform WASAPI that render was unsuccessful
5454 hr = renderClient->ReleaseBuffer( 0, 0 );
5455 if ( FAILED( hr ) ) {
5456 errorText = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5463 // Inform WASAPI that render was unsuccessful
5464 hr = renderClient->ReleaseBuffer( 0, 0 );
5465 if ( FAILED( hr ) ) {
5466 errorText = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5472 // if the callback buffer was pushed renderBuffer reset callbackPulled flag
5473 if ( callbackPushed ) {
5474 // unsetting the callbackPulled flag lets the stream know that
5475 // the audio device is ready for another callback output buffer.
5476 callbackPulled = false;
5483 CoTaskMemFree( captureFormat );
5484 CoTaskMemFree( renderFormat );
5486 free ( convBuffer );
5487 delete renderResampler;
5488 delete captureResampler;
5492 // update stream state
5493 stream_.state = STREAM_STOPPED;
5495 if ( !errorText.empty() )
5497 errorText_ = errorText;
5502 //******************** End of __WINDOWS_WASAPI__ *********************//
5506 #if defined(__WINDOWS_DS__) // Windows DirectSound API
5508 // Modified by Robin Davies, October 2005
5509 // - Improvements to DirectX pointer chasing.
5510 // - Bug fix for non-power-of-two Asio granularity used by Edirol PCR-A30.
5511 // - Auto-call CoInitialize for DSOUND and ASIO platforms.
5512 // Various revisions for RtAudio 4.0 by Gary Scavone, April 2007
5513 // Changed device query structure for RtAudio 4.0.7, January 2010
5515 #include <windows.h>
5516 #include <process.h>
5517 #include <mmsystem.h>
5521 #include <algorithm>
5523 #if defined(__MINGW32__)
5524 // missing from latest mingw winapi
5525 #define WAVE_FORMAT_96M08 0x00010000 /* 96 kHz, Mono, 8-bit */
5526 #define WAVE_FORMAT_96S08 0x00020000 /* 96 kHz, Stereo, 8-bit */
5527 #define WAVE_FORMAT_96M16 0x00040000 /* 96 kHz, Mono, 16-bit */
5528 #define WAVE_FORMAT_96S16 0x00080000 /* 96 kHz, Stereo, 16-bit */
5531 #define MINIMUM_DEVICE_BUFFER_SIZE 32768
5533 #ifdef _MSC_VER // if Microsoft Visual C++
5534 #pragma comment( lib, "winmm.lib" ) // then, auto-link winmm.lib. Otherwise, it has to be added manually.
5537 static inline DWORD dsPointerBetween( DWORD pointer, DWORD laterPointer, DWORD earlierPointer, DWORD bufferSize )
5539 if ( pointer > bufferSize ) pointer -= bufferSize;
5540 if ( laterPointer < earlierPointer ) laterPointer += bufferSize;
5541 if ( pointer < earlierPointer ) pointer += bufferSize;
5542 return pointer >= earlierPointer && pointer < laterPointer;
5545 // A structure to hold various information related to the DirectSound
5546 // API implementation.
5548 unsigned int drainCounter; // Tracks callback counts when draining
5549 bool internalDrain; // Indicates if stop is initiated from callback or not.
5553 UINT bufferPointer[2];
5554 DWORD dsBufferSize[2];
5555 DWORD dsPointerLeadTime[2]; // the number of bytes ahead of the safe pointer to lead by.
5559 :drainCounter(0), internalDrain(false) { id[0] = 0; id[1] = 0; buffer[0] = 0; buffer[1] = 0; xrun[0] = false; xrun[1] = false; bufferPointer[0] = 0; bufferPointer[1] = 0; }
5562 // Declarations for utility functions, callbacks, and structures
5563 // specific to the DirectSound implementation.
5564 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
5565 LPCTSTR description,
5569 static const char* getErrorString( int code );
5571 static unsigned __stdcall callbackHandler( void *ptr );
5580 : found(false) { validId[0] = false; validId[1] = false; }
5583 struct DsProbeData {
5585 std::vector<struct DsDevice>* dsDevices;
5588 RtApiDs :: RtApiDs()
5590 // Dsound will run both-threaded. If CoInitialize fails, then just
5591 // accept whatever the mainline chose for a threading model.
5592 coInitialized_ = false;
5593 HRESULT hr = CoInitialize( NULL );
5594 if ( !FAILED( hr ) ) coInitialized_ = true;
5597 RtApiDs :: ~RtApiDs()
5599 if ( stream_.state != STREAM_CLOSED ) closeStream();
5600 if ( coInitialized_ ) CoUninitialize(); // balanced call.
5603 // The DirectSound default output is always the first device.
5604 unsigned int RtApiDs :: getDefaultOutputDevice( void )
5609 // The DirectSound default input is always the first input device,
5610 // which is the first capture device enumerated.
5611 unsigned int RtApiDs :: getDefaultInputDevice( void )
5616 unsigned int RtApiDs :: getDeviceCount( void )
5618 // Set query flag for previously found devices to false, so that we
5619 // can check for any devices that have disappeared.
5620 for ( unsigned int i=0; i<dsDevices.size(); i++ )
5621 dsDevices[i].found = false;
5623 // Query DirectSound devices.
5624 struct DsProbeData probeInfo;
5625 probeInfo.isInput = false;
5626 probeInfo.dsDevices = &dsDevices;
5627 HRESULT result = DirectSoundEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
5628 if ( FAILED( result ) ) {
5629 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating output devices!";
5630 errorText_ = errorStream_.str();
5631 error( RtAudioError::WARNING );
5634 // Query DirectSoundCapture devices.
5635 probeInfo.isInput = true;
5636 result = DirectSoundCaptureEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
5637 if ( FAILED( result ) ) {
5638 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating input devices!";
5639 errorText_ = errorStream_.str();
5640 error( RtAudioError::WARNING );
5643 // Clean out any devices that may have disappeared (code update submitted by Eli Zehngut).
5644 for ( unsigned int i=0; i<dsDevices.size(); ) {
5645 if ( dsDevices[i].found == false ) dsDevices.erase( dsDevices.begin() + i );
5649 return static_cast<unsigned int>(dsDevices.size());
5652 RtAudio::DeviceInfo RtApiDs :: getDeviceInfo( unsigned int device )
5654 RtAudio::DeviceInfo info;
5655 info.probed = false;
5657 if ( dsDevices.size() == 0 ) {
5658 // Force a query of all devices
5660 if ( dsDevices.size() == 0 ) {
5661 errorText_ = "RtApiDs::getDeviceInfo: no devices found!";
5662 error( RtAudioError::INVALID_USE );
5667 if ( device >= dsDevices.size() ) {
5668 errorText_ = "RtApiDs::getDeviceInfo: device ID is invalid!";
5669 error( RtAudioError::INVALID_USE );
5674 if ( dsDevices[ device ].validId[0] == false ) goto probeInput;
5676 LPDIRECTSOUND output;
5678 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
5679 if ( FAILED( result ) ) {
5680 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
5681 errorText_ = errorStream_.str();
5682 error( RtAudioError::WARNING );
5686 outCaps.dwSize = sizeof( outCaps );
5687 result = output->GetCaps( &outCaps );
5688 if ( FAILED( result ) ) {
5690 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting capabilities!";
5691 errorText_ = errorStream_.str();
5692 error( RtAudioError::WARNING );
5696 // Get output channel information.
5697 info.outputChannels = ( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ? 2 : 1;
5699 // Get sample rate information.
5700 info.sampleRates.clear();
5701 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
5702 if ( SAMPLE_RATES[k] >= (unsigned int) outCaps.dwMinSecondarySampleRate &&
5703 SAMPLE_RATES[k] <= (unsigned int) outCaps.dwMaxSecondarySampleRate ) {
5704 info.sampleRates.push_back( SAMPLE_RATES[k] );
5706 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
5707 info.preferredSampleRate = SAMPLE_RATES[k];
5711 // Get format information.
5712 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT ) info.nativeFormats |= RTAUDIO_SINT16;
5713 if ( outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) info.nativeFormats |= RTAUDIO_SINT8;
5717 if ( getDefaultOutputDevice() == device )
5718 info.isDefaultOutput = true;
5720 if ( dsDevices[ device ].validId[1] == false ) {
5721 info.name = dsDevices[ device ].name;
5728 LPDIRECTSOUNDCAPTURE input;
5729 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
5730 if ( FAILED( result ) ) {
5731 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
5732 errorText_ = errorStream_.str();
5733 error( RtAudioError::WARNING );
5738 inCaps.dwSize = sizeof( inCaps );
5739 result = input->GetCaps( &inCaps );
5740 if ( FAILED( result ) ) {
5742 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting object capabilities (" << dsDevices[ device ].name << ")!";
5743 errorText_ = errorStream_.str();
5744 error( RtAudioError::WARNING );
5748 // Get input channel information.
5749 info.inputChannels = inCaps.dwChannels;
5751 // Get sample rate and format information.
5752 std::vector<unsigned int> rates;
5753 if ( inCaps.dwChannels >= 2 ) {
5754 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5755 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5756 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5757 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5758 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5759 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5760 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5761 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5763 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
5764 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) rates.push_back( 11025 );
5765 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) rates.push_back( 22050 );
5766 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) rates.push_back( 44100 );
5767 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) rates.push_back( 96000 );
5769 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
5770 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) rates.push_back( 11025 );
5771 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) rates.push_back( 22050 );
5772 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) rates.push_back( 44100 );
5773 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) rates.push_back( 96000 );
5776 else if ( inCaps.dwChannels == 1 ) {
5777 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5778 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5779 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5780 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5781 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5782 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5783 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5784 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5786 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
5787 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) rates.push_back( 11025 );
5788 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) rates.push_back( 22050 );
5789 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) rates.push_back( 44100 );
5790 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) rates.push_back( 96000 );
5792 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
5793 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) rates.push_back( 11025 );
5794 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) rates.push_back( 22050 );
5795 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) rates.push_back( 44100 );
5796 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) rates.push_back( 96000 );
5799 else info.inputChannels = 0; // technically, this would be an error
5803 if ( info.inputChannels == 0 ) return info;
5805 // Copy the supported rates to the info structure but avoid duplication.
5807 for ( unsigned int i=0; i<rates.size(); i++ ) {
5809 for ( unsigned int j=0; j<info.sampleRates.size(); j++ ) {
5810 if ( rates[i] == info.sampleRates[j] ) {
5815 if ( found == false ) info.sampleRates.push_back( rates[i] );
5817 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
5819 // If device opens for both playback and capture, we determine the channels.
5820 if ( info.outputChannels > 0 && info.inputChannels > 0 )
5821 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
5823 if ( device == 0 ) info.isDefaultInput = true;
5825 // Copy name and return.
5826 info.name = dsDevices[ device ].name;
5831 bool RtApiDs :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
5832 unsigned int firstChannel, unsigned int sampleRate,
5833 RtAudioFormat format, unsigned int *bufferSize,
5834 RtAudio::StreamOptions *options )
5836 if ( channels + firstChannel > 2 ) {
5837 errorText_ = "RtApiDs::probeDeviceOpen: DirectSound does not support more than 2 channels per device.";
5841 size_t nDevices = dsDevices.size();
5842 if ( nDevices == 0 ) {
5843 // This should not happen because a check is made before this function is called.
5844 errorText_ = "RtApiDs::probeDeviceOpen: no devices found!";
5848 if ( device >= nDevices ) {
5849 // This should not happen because a check is made before this function is called.
5850 errorText_ = "RtApiDs::probeDeviceOpen: device ID is invalid!";
5854 if ( mode == OUTPUT ) {
5855 if ( dsDevices[ device ].validId[0] == false ) {
5856 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support output!";
5857 errorText_ = errorStream_.str();
5861 else { // mode == INPUT
5862 if ( dsDevices[ device ].validId[1] == false ) {
5863 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support input!";
5864 errorText_ = errorStream_.str();
5869 // According to a note in PortAudio, using GetDesktopWindow()
5870 // instead of GetForegroundWindow() is supposed to avoid problems
5871 // that occur when the application's window is not the foreground
5872 // window. Also, if the application window closes before the
5873 // DirectSound buffer, DirectSound can crash. In the past, I had
5874 // problems when using GetDesktopWindow() but it seems fine now
5875 // (January 2010). I'll leave it commented here.
5876 // HWND hWnd = GetForegroundWindow();
5877 HWND hWnd = GetDesktopWindow();
5879 // Check the numberOfBuffers parameter and limit the lowest value to
5880 // two. This is a judgement call and a value of two is probably too
5881 // low for capture, but it should work for playback.
5883 if ( options ) nBuffers = options->numberOfBuffers;
5884 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) nBuffers = 2;
5885 if ( nBuffers < 2 ) nBuffers = 3;
5887 // Check the lower range of the user-specified buffer size and set
5888 // (arbitrarily) to a lower bound of 32.
5889 if ( *bufferSize < 32 ) *bufferSize = 32;
5891 // Create the wave format structure. The data format setting will
5892 // be determined later.
5893 WAVEFORMATEX waveFormat;
5894 ZeroMemory( &waveFormat, sizeof(WAVEFORMATEX) );
5895 waveFormat.wFormatTag = WAVE_FORMAT_PCM;
5896 waveFormat.nChannels = channels + firstChannel;
5897 waveFormat.nSamplesPerSec = (unsigned long) sampleRate;
5899 // Determine the device buffer size. By default, we'll use the value
5900 // defined above (32K), but we will grow it to make allowances for
5901 // very large software buffer sizes.
5902 DWORD dsBufferSize = MINIMUM_DEVICE_BUFFER_SIZE;
5903 DWORD dsPointerLeadTime = 0;
5905 void *ohandle = 0, *bhandle = 0;
5907 if ( mode == OUTPUT ) {
5909 LPDIRECTSOUND output;
5910 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
5911 if ( FAILED( result ) ) {
5912 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
5913 errorText_ = errorStream_.str();
5918 outCaps.dwSize = sizeof( outCaps );
5919 result = output->GetCaps( &outCaps );
5920 if ( FAILED( result ) ) {
5922 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting capabilities (" << dsDevices[ device ].name << ")!";
5923 errorText_ = errorStream_.str();
5927 // Check channel information.
5928 if ( channels + firstChannel == 2 && !( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ) {
5929 errorStream_ << "RtApiDs::getDeviceInfo: the output device (" << dsDevices[ device ].name << ") does not support stereo playback.";
5930 errorText_ = errorStream_.str();
5934 // Check format information. Use 16-bit format unless not
5935 // supported or user requests 8-bit.
5936 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT &&
5937 !( format == RTAUDIO_SINT8 && outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) ) {
5938 waveFormat.wBitsPerSample = 16;
5939 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
5942 waveFormat.wBitsPerSample = 8;
5943 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
5945 stream_.userFormat = format;
5947 // Update wave format structure and buffer information.
5948 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
5949 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
5950 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
5952 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
5953 while ( dsPointerLeadTime * 2U > dsBufferSize )
5956 // Set cooperative level to DSSCL_EXCLUSIVE ... sound stops when window focus changes.
5957 // result = output->SetCooperativeLevel( hWnd, DSSCL_EXCLUSIVE );
5958 // Set cooperative level to DSSCL_PRIORITY ... sound remains when window focus changes.
5959 result = output->SetCooperativeLevel( hWnd, DSSCL_PRIORITY );
5960 if ( FAILED( result ) ) {
5962 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting cooperative level (" << dsDevices[ device ].name << ")!";
5963 errorText_ = errorStream_.str();
5967 // Even though we will write to the secondary buffer, we need to
5968 // access the primary buffer to set the correct output format
5969 // (since the default is 8-bit, 22 kHz!). Setup the DS primary
5970 // buffer description.
5971 DSBUFFERDESC bufferDescription;
5972 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
5973 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
5974 bufferDescription.dwFlags = DSBCAPS_PRIMARYBUFFER;
5976 // Obtain the primary buffer
5977 LPDIRECTSOUNDBUFFER buffer;
5978 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
5979 if ( FAILED( result ) ) {
5981 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") accessing primary buffer (" << dsDevices[ device ].name << ")!";
5982 errorText_ = errorStream_.str();
5986 // Set the primary DS buffer sound format.
5987 result = buffer->SetFormat( &waveFormat );
5988 if ( FAILED( result ) ) {
5990 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting primary buffer format (" << dsDevices[ device ].name << ")!";
5991 errorText_ = errorStream_.str();
5995 // Setup the secondary DS buffer description.
5996 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
5997 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
5998 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
5999 DSBCAPS_GLOBALFOCUS |
6000 DSBCAPS_GETCURRENTPOSITION2 |
6001 DSBCAPS_LOCHARDWARE ); // Force hardware mixing
6002 bufferDescription.dwBufferBytes = dsBufferSize;
6003 bufferDescription.lpwfxFormat = &waveFormat;
6005 // Try to create the secondary DS buffer. If that doesn't work,
6006 // try to use software mixing. Otherwise, there's a problem.
6007 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
6008 if ( FAILED( result ) ) {
6009 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
6010 DSBCAPS_GLOBALFOCUS |
6011 DSBCAPS_GETCURRENTPOSITION2 |
6012 DSBCAPS_LOCSOFTWARE ); // Force software mixing
6013 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
6014 if ( FAILED( result ) ) {
6016 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating secondary buffer (" << dsDevices[ device ].name << ")!";
6017 errorText_ = errorStream_.str();
6022 // Get the buffer size ... might be different from what we specified.
6024 dsbcaps.dwSize = sizeof( DSBCAPS );
6025 result = buffer->GetCaps( &dsbcaps );
6026 if ( FAILED( result ) ) {
6029 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
6030 errorText_ = errorStream_.str();
6034 dsBufferSize = dsbcaps.dwBufferBytes;
6036 // Lock the DS buffer
6039 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
6040 if ( FAILED( result ) ) {
6043 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking buffer (" << dsDevices[ device ].name << ")!";
6044 errorText_ = errorStream_.str();
6048 // Zero the DS buffer
6049 ZeroMemory( audioPtr, dataLen );
6051 // Unlock the DS buffer
6052 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6053 if ( FAILED( result ) ) {
6056 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking buffer (" << dsDevices[ device ].name << ")!";
6057 errorText_ = errorStream_.str();
6061 ohandle = (void *) output;
6062 bhandle = (void *) buffer;
6065 if ( mode == INPUT ) {
6067 LPDIRECTSOUNDCAPTURE input;
6068 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
6069 if ( FAILED( result ) ) {
6070 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
6071 errorText_ = errorStream_.str();
6076 inCaps.dwSize = sizeof( inCaps );
6077 result = input->GetCaps( &inCaps );
6078 if ( FAILED( result ) ) {
6080 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting input capabilities (" << dsDevices[ device ].name << ")!";
6081 errorText_ = errorStream_.str();
6085 // Check channel information.
6086 if ( inCaps.dwChannels < channels + firstChannel ) {
6087 errorText_ = "RtApiDs::getDeviceInfo: the input device does not support requested input channels.";
6091 // Check format information. Use 16-bit format unless user
6093 DWORD deviceFormats;
6094 if ( channels + firstChannel == 2 ) {
6095 deviceFormats = WAVE_FORMAT_1S08 | WAVE_FORMAT_2S08 | WAVE_FORMAT_4S08 | WAVE_FORMAT_96S08;
6096 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
6097 waveFormat.wBitsPerSample = 8;
6098 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
6100 else { // assume 16-bit is supported
6101 waveFormat.wBitsPerSample = 16;
6102 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
6105 else { // channel == 1
6106 deviceFormats = WAVE_FORMAT_1M08 | WAVE_FORMAT_2M08 | WAVE_FORMAT_4M08 | WAVE_FORMAT_96M08;
6107 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
6108 waveFormat.wBitsPerSample = 8;
6109 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
6111 else { // assume 16-bit is supported
6112 waveFormat.wBitsPerSample = 16;
6113 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
6116 stream_.userFormat = format;
6118 // Update wave format structure and buffer information.
6119 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
6120 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
6121 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
6123 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
6124 while ( dsPointerLeadTime * 2U > dsBufferSize )
6127 // Setup the secondary DS buffer description.
6128 DSCBUFFERDESC bufferDescription;
6129 ZeroMemory( &bufferDescription, sizeof( DSCBUFFERDESC ) );
6130 bufferDescription.dwSize = sizeof( DSCBUFFERDESC );
6131 bufferDescription.dwFlags = 0;
6132 bufferDescription.dwReserved = 0;
6133 bufferDescription.dwBufferBytes = dsBufferSize;
6134 bufferDescription.lpwfxFormat = &waveFormat;
6136 // Create the capture buffer.
6137 LPDIRECTSOUNDCAPTUREBUFFER buffer;
6138 result = input->CreateCaptureBuffer( &bufferDescription, &buffer, NULL );
6139 if ( FAILED( result ) ) {
6141 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating input buffer (" << dsDevices[ device ].name << ")!";
6142 errorText_ = errorStream_.str();
6146 // Get the buffer size ... might be different from what we specified.
6148 dscbcaps.dwSize = sizeof( DSCBCAPS );
6149 result = buffer->GetCaps( &dscbcaps );
6150 if ( FAILED( result ) ) {
6153 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
6154 errorText_ = errorStream_.str();
6158 dsBufferSize = dscbcaps.dwBufferBytes;
6160 // NOTE: We could have a problem here if this is a duplex stream
6161 // and the play and capture hardware buffer sizes are different
6162 // (I'm actually not sure if that is a problem or not).
6163 // Currently, we are not verifying that.
6165 // Lock the capture buffer
6168 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
6169 if ( FAILED( result ) ) {
6172 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking input buffer (" << dsDevices[ device ].name << ")!";
6173 errorText_ = errorStream_.str();
6178 ZeroMemory( audioPtr, dataLen );
6180 // Unlock the buffer
6181 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6182 if ( FAILED( result ) ) {
6185 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking input buffer (" << dsDevices[ device ].name << ")!";
6186 errorText_ = errorStream_.str();
6190 ohandle = (void *) input;
6191 bhandle = (void *) buffer;
6194 // Set various stream parameters
6195 DsHandle *handle = 0;
6196 stream_.nDeviceChannels[mode] = channels + firstChannel;
6197 stream_.nUserChannels[mode] = channels;
6198 stream_.bufferSize = *bufferSize;
6199 stream_.channelOffset[mode] = firstChannel;
6200 stream_.deviceInterleaved[mode] = true;
6201 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
6202 else stream_.userInterleaved = true;
6204 // Set flag for buffer conversion
6205 stream_.doConvertBuffer[mode] = false;
6206 if (stream_.nUserChannels[mode] != stream_.nDeviceChannels[mode])
6207 stream_.doConvertBuffer[mode] = true;
6208 if (stream_.userFormat != stream_.deviceFormat[mode])
6209 stream_.doConvertBuffer[mode] = true;
6210 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
6211 stream_.nUserChannels[mode] > 1 )
6212 stream_.doConvertBuffer[mode] = true;
6214 // Allocate necessary internal buffers
6215 long bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
6216 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
6217 if ( stream_.userBuffer[mode] == NULL ) {
6218 errorText_ = "RtApiDs::probeDeviceOpen: error allocating user buffer memory.";
6222 if ( stream_.doConvertBuffer[mode] ) {
6224 bool makeBuffer = true;
6225 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
6226 if ( mode == INPUT ) {
6227 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
6228 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
6229 if ( bufferBytes <= (long) bytesOut ) makeBuffer = false;
6234 bufferBytes *= *bufferSize;
6235 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
6236 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
6237 if ( stream_.deviceBuffer == NULL ) {
6238 errorText_ = "RtApiDs::probeDeviceOpen: error allocating device buffer memory.";
6244 // Allocate our DsHandle structures for the stream.
6245 if ( stream_.apiHandle == 0 ) {
6247 handle = new DsHandle;
6249 catch ( std::bad_alloc& ) {
6250 errorText_ = "RtApiDs::probeDeviceOpen: error allocating AsioHandle memory.";
6254 // Create a manual-reset event.
6255 handle->condition = CreateEvent( NULL, // no security
6256 TRUE, // manual-reset
6257 FALSE, // non-signaled initially
6259 stream_.apiHandle = (void *) handle;
6262 handle = (DsHandle *) stream_.apiHandle;
6263 handle->id[mode] = ohandle;
6264 handle->buffer[mode] = bhandle;
6265 handle->dsBufferSize[mode] = dsBufferSize;
6266 handle->dsPointerLeadTime[mode] = dsPointerLeadTime;
6268 stream_.device[mode] = device;
6269 stream_.state = STREAM_STOPPED;
6270 if ( stream_.mode == OUTPUT && mode == INPUT )
6271 // We had already set up an output stream.
6272 stream_.mode = DUPLEX;
6274 stream_.mode = mode;
6275 stream_.nBuffers = nBuffers;
6276 stream_.sampleRate = sampleRate;
6278 // Setup the buffer conversion information structure.
6279 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
6281 // Setup the callback thread.
6282 if ( stream_.callbackInfo.isRunning == false ) {
6284 stream_.callbackInfo.isRunning = true;
6285 stream_.callbackInfo.object = (void *) this;
6286 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &callbackHandler,
6287 &stream_.callbackInfo, 0, &threadId );
6288 if ( stream_.callbackInfo.thread == 0 ) {
6289 errorText_ = "RtApiDs::probeDeviceOpen: error creating callback thread!";
6293 // Boost DS thread priority
6294 SetThreadPriority( (HANDLE) stream_.callbackInfo.thread, THREAD_PRIORITY_HIGHEST );
6300 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
6301 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
6302 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6303 if ( buffer ) buffer->Release();
6306 if ( handle->buffer[1] ) {
6307 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
6308 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6309 if ( buffer ) buffer->Release();
6312 CloseHandle( handle->condition );
6314 stream_.apiHandle = 0;
6317 for ( int i=0; i<2; i++ ) {
6318 if ( stream_.userBuffer[i] ) {
6319 free( stream_.userBuffer[i] );
6320 stream_.userBuffer[i] = 0;
6324 if ( stream_.deviceBuffer ) {
6325 free( stream_.deviceBuffer );
6326 stream_.deviceBuffer = 0;
6329 stream_.state = STREAM_CLOSED;
6333 void RtApiDs :: closeStream()
6335 if ( stream_.state == STREAM_CLOSED ) {
6336 errorText_ = "RtApiDs::closeStream(): no open stream to close!";
6337 error( RtAudioError::WARNING );
6341 // Stop the callback thread.
6342 stream_.callbackInfo.isRunning = false;
6343 WaitForSingleObject( (HANDLE) stream_.callbackInfo.thread, INFINITE );
6344 CloseHandle( (HANDLE) stream_.callbackInfo.thread );
6346 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6348 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
6349 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
6350 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6357 if ( handle->buffer[1] ) {
6358 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
6359 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6366 CloseHandle( handle->condition );
6368 stream_.apiHandle = 0;
6371 for ( int i=0; i<2; i++ ) {
6372 if ( stream_.userBuffer[i] ) {
6373 free( stream_.userBuffer[i] );
6374 stream_.userBuffer[i] = 0;
6378 if ( stream_.deviceBuffer ) {
6379 free( stream_.deviceBuffer );
6380 stream_.deviceBuffer = 0;
6383 stream_.mode = UNINITIALIZED;
6384 stream_.state = STREAM_CLOSED;
6387 void RtApiDs :: startStream()
6390 if ( stream_.state == STREAM_RUNNING ) {
6391 errorText_ = "RtApiDs::startStream(): the stream is already running!";
6392 error( RtAudioError::WARNING );
6396 #if defined( HAVE_GETTIMEOFDAY )
6397 gettimeofday( &stream_.lastTickTimestamp, NULL );
6400 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6402 // Increase scheduler frequency on lesser windows (a side-effect of
6403 // increasing timer accuracy). On greater windows (Win2K or later),
6404 // this is already in effect.
6405 timeBeginPeriod( 1 );
6407 buffersRolling = false;
6408 duplexPrerollBytes = 0;
6410 if ( stream_.mode == DUPLEX ) {
6411 // 0.5 seconds of silence in DUPLEX mode while the devices spin up and synchronize.
6412 duplexPrerollBytes = (int) ( 0.5 * stream_.sampleRate * formatBytes( stream_.deviceFormat[1] ) * stream_.nDeviceChannels[1] );
6416 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6418 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6419 result = buffer->Play( 0, 0, DSBPLAY_LOOPING );
6420 if ( FAILED( result ) ) {
6421 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting output buffer!";
6422 errorText_ = errorStream_.str();
6427 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6429 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6430 result = buffer->Start( DSCBSTART_LOOPING );
6431 if ( FAILED( result ) ) {
6432 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting input buffer!";
6433 errorText_ = errorStream_.str();
6438 handle->drainCounter = 0;
6439 handle->internalDrain = false;
6440 ResetEvent( handle->condition );
6441 stream_.state = STREAM_RUNNING;
6444 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
6447 void RtApiDs :: stopStream()
6450 if ( stream_.state == STREAM_STOPPED ) {
6451 errorText_ = "RtApiDs::stopStream(): the stream is already stopped!";
6452 error( RtAudioError::WARNING );
6459 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6460 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6461 if ( handle->drainCounter == 0 ) {
6462 handle->drainCounter = 2;
6463 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
6466 stream_.state = STREAM_STOPPED;
6468 MUTEX_LOCK( &stream_.mutex );
6470 // Stop the buffer and clear memory
6471 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6472 result = buffer->Stop();
6473 if ( FAILED( result ) ) {
6474 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping output buffer!";
6475 errorText_ = errorStream_.str();
6479 // Lock the buffer and clear it so that if we start to play again,
6480 // we won't have old data playing.
6481 result = buffer->Lock( 0, handle->dsBufferSize[0], &audioPtr, &dataLen, NULL, NULL, 0 );
6482 if ( FAILED( result ) ) {
6483 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking output buffer!";
6484 errorText_ = errorStream_.str();
6488 // Zero the DS buffer
6489 ZeroMemory( audioPtr, dataLen );
6491 // Unlock the DS buffer
6492 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6493 if ( FAILED( result ) ) {
6494 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking output buffer!";
6495 errorText_ = errorStream_.str();
6499 // If we start playing again, we must begin at beginning of buffer.
6500 handle->bufferPointer[0] = 0;
6503 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6504 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6508 stream_.state = STREAM_STOPPED;
6510 if ( stream_.mode != DUPLEX )
6511 MUTEX_LOCK( &stream_.mutex );
6513 result = buffer->Stop();
6514 if ( FAILED( result ) ) {
6515 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping input buffer!";
6516 errorText_ = errorStream_.str();
6520 // Lock the buffer and clear it so that if we start to play again,
6521 // we won't have old data playing.
6522 result = buffer->Lock( 0, handle->dsBufferSize[1], &audioPtr, &dataLen, NULL, NULL, 0 );
6523 if ( FAILED( result ) ) {
6524 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking input buffer!";
6525 errorText_ = errorStream_.str();
6529 // Zero the DS buffer
6530 ZeroMemory( audioPtr, dataLen );
6532 // Unlock the DS buffer
6533 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6534 if ( FAILED( result ) ) {
6535 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking input buffer!";
6536 errorText_ = errorStream_.str();
6540 // If we start recording again, we must begin at beginning of buffer.
6541 handle->bufferPointer[1] = 0;
6545 timeEndPeriod( 1 ); // revert to normal scheduler frequency on lesser windows.
6546 MUTEX_UNLOCK( &stream_.mutex );
6548 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
6551 void RtApiDs :: abortStream()
6554 if ( stream_.state == STREAM_STOPPED ) {
6555 errorText_ = "RtApiDs::abortStream(): the stream is already stopped!";
6556 error( RtAudioError::WARNING );
6560 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6561 handle->drainCounter = 2;
6566 void RtApiDs :: callbackEvent()
6568 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) {
6569 Sleep( 50 ); // sleep 50 milliseconds
6573 if ( stream_.state == STREAM_CLOSED ) {
6574 errorText_ = "RtApiDs::callbackEvent(): the stream is closed ... this shouldn't happen!";
6575 error( RtAudioError::WARNING );
6579 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
6580 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6582 // Check if we were draining the stream and signal is finished.
6583 if ( handle->drainCounter > stream_.nBuffers + 2 ) {
6585 stream_.state = STREAM_STOPPING;
6586 if ( handle->internalDrain == false )
6587 SetEvent( handle->condition );
6593 // Invoke user callback to get fresh output data UNLESS we are
6595 if ( handle->drainCounter == 0 ) {
6596 RtAudioCallback callback = (RtAudioCallback) info->callback;
6597 double streamTime = getStreamTime();
6598 RtAudioStreamStatus status = 0;
6599 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
6600 status |= RTAUDIO_OUTPUT_UNDERFLOW;
6601 handle->xrun[0] = false;
6603 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
6604 status |= RTAUDIO_INPUT_OVERFLOW;
6605 handle->xrun[1] = false;
6607 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
6608 stream_.bufferSize, streamTime, status, info->userData );
6609 if ( cbReturnValue == 2 ) {
6610 stream_.state = STREAM_STOPPING;
6611 handle->drainCounter = 2;
6615 else if ( cbReturnValue == 1 ) {
6616 handle->drainCounter = 1;
6617 handle->internalDrain = true;
6622 DWORD currentWritePointer, safeWritePointer;
6623 DWORD currentReadPointer, safeReadPointer;
6624 UINT nextWritePointer;
6626 LPVOID buffer1 = NULL;
6627 LPVOID buffer2 = NULL;
6628 DWORD bufferSize1 = 0;
6629 DWORD bufferSize2 = 0;
6634 MUTEX_LOCK( &stream_.mutex );
6635 if ( stream_.state == STREAM_STOPPED ) {
6636 MUTEX_UNLOCK( &stream_.mutex );
6640 if ( buffersRolling == false ) {
6641 if ( stream_.mode == DUPLEX ) {
6642 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
6644 // It takes a while for the devices to get rolling. As a result,
6645 // there's no guarantee that the capture and write device pointers
6646 // will move in lockstep. Wait here for both devices to start
6647 // rolling, and then set our buffer pointers accordingly.
6648 // e.g. Crystal Drivers: the capture buffer starts up 5700 to 9600
6649 // bytes later than the write buffer.
6651 // Stub: a serious risk of having a pre-emptive scheduling round
6652 // take place between the two GetCurrentPosition calls... but I'm
6653 // really not sure how to solve the problem. Temporarily boost to
6654 // Realtime priority, maybe; but I'm not sure what priority the
6655 // DirectSound service threads run at. We *should* be roughly
6656 // within a ms or so of correct.
6658 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6659 LPDIRECTSOUNDCAPTUREBUFFER dsCaptureBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6661 DWORD startSafeWritePointer, startSafeReadPointer;
6663 result = dsWriteBuffer->GetCurrentPosition( NULL, &startSafeWritePointer );
6664 if ( FAILED( result ) ) {
6665 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6666 errorText_ = errorStream_.str();
6667 MUTEX_UNLOCK( &stream_.mutex );
6668 error( RtAudioError::SYSTEM_ERROR );
6671 result = dsCaptureBuffer->GetCurrentPosition( NULL, &startSafeReadPointer );
6672 if ( FAILED( result ) ) {
6673 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6674 errorText_ = errorStream_.str();
6675 MUTEX_UNLOCK( &stream_.mutex );
6676 error( RtAudioError::SYSTEM_ERROR );
6680 result = dsWriteBuffer->GetCurrentPosition( NULL, &safeWritePointer );
6681 if ( FAILED( result ) ) {
6682 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6683 errorText_ = errorStream_.str();
6684 MUTEX_UNLOCK( &stream_.mutex );
6685 error( RtAudioError::SYSTEM_ERROR );
6688 result = dsCaptureBuffer->GetCurrentPosition( NULL, &safeReadPointer );
6689 if ( FAILED( result ) ) {
6690 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6691 errorText_ = errorStream_.str();
6692 MUTEX_UNLOCK( &stream_.mutex );
6693 error( RtAudioError::SYSTEM_ERROR );
6696 if ( safeWritePointer != startSafeWritePointer && safeReadPointer != startSafeReadPointer ) break;
6700 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
6702 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
6703 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
6704 handle->bufferPointer[1] = safeReadPointer;
6706 else if ( stream_.mode == OUTPUT ) {
6708 // Set the proper nextWritePosition after initial startup.
6709 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6710 result = dsWriteBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
6711 if ( FAILED( result ) ) {
6712 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6713 errorText_ = errorStream_.str();
6714 MUTEX_UNLOCK( &stream_.mutex );
6715 error( RtAudioError::SYSTEM_ERROR );
6718 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
6719 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
6722 buffersRolling = true;
6725 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6727 LPDIRECTSOUNDBUFFER dsBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6729 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
6730 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
6731 bufferBytes *= formatBytes( stream_.userFormat );
6732 memset( stream_.userBuffer[0], 0, bufferBytes );
6735 // Setup parameters and do buffer conversion if necessary.
6736 if ( stream_.doConvertBuffer[0] ) {
6737 buffer = stream_.deviceBuffer;
6738 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
6739 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[0];
6740 bufferBytes *= formatBytes( stream_.deviceFormat[0] );
6743 buffer = stream_.userBuffer[0];
6744 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
6745 bufferBytes *= formatBytes( stream_.userFormat );
6748 // No byte swapping necessary in DirectSound implementation.
6750 // Ahhh ... windoze. 16-bit data is signed but 8-bit data is
6751 // unsigned. So, we need to convert our signed 8-bit data here to
6753 if ( stream_.deviceFormat[0] == RTAUDIO_SINT8 )
6754 for ( int i=0; i<bufferBytes; i++ ) buffer[i] = (unsigned char) ( buffer[i] + 128 );
6756 DWORD dsBufferSize = handle->dsBufferSize[0];
6757 nextWritePointer = handle->bufferPointer[0];
6759 DWORD endWrite, leadPointer;
6761 // Find out where the read and "safe write" pointers are.
6762 result = dsBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
6763 if ( FAILED( result ) ) {
6764 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6765 errorText_ = errorStream_.str();
6766 MUTEX_UNLOCK( &stream_.mutex );
6767 error( RtAudioError::SYSTEM_ERROR );
6771 // We will copy our output buffer into the region between
6772 // safeWritePointer and leadPointer. If leadPointer is not
6773 // beyond the next endWrite position, wait until it is.
6774 leadPointer = safeWritePointer + handle->dsPointerLeadTime[0];
6775 //std::cout << "safeWritePointer = " << safeWritePointer << ", leadPointer = " << leadPointer << ", nextWritePointer = " << nextWritePointer << std::endl;
6776 if ( leadPointer > dsBufferSize ) leadPointer -= dsBufferSize;
6777 if ( leadPointer < nextWritePointer ) leadPointer += dsBufferSize; // unwrap offset
6778 endWrite = nextWritePointer + bufferBytes;
6780 // Check whether the entire write region is behind the play pointer.
6781 if ( leadPointer >= endWrite ) break;
6783 // If we are here, then we must wait until the leadPointer advances
6784 // beyond the end of our next write region. We use the
6785 // Sleep() function to suspend operation until that happens.
6786 double millis = ( endWrite - leadPointer ) * 1000.0;
6787 millis /= ( formatBytes( stream_.deviceFormat[0]) * stream_.nDeviceChannels[0] * stream_.sampleRate);
6788 if ( millis < 1.0 ) millis = 1.0;
6789 Sleep( (DWORD) millis );
6792 if ( dsPointerBetween( nextWritePointer, safeWritePointer, currentWritePointer, dsBufferSize )
6793 || dsPointerBetween( endWrite, safeWritePointer, currentWritePointer, dsBufferSize ) ) {
6794 // We've strayed into the forbidden zone ... resync the read pointer.
6795 handle->xrun[0] = true;
6796 nextWritePointer = safeWritePointer + handle->dsPointerLeadTime[0] - bufferBytes;
6797 if ( nextWritePointer >= dsBufferSize ) nextWritePointer -= dsBufferSize;
6798 handle->bufferPointer[0] = nextWritePointer;
6799 endWrite = nextWritePointer + bufferBytes;
6802 // Lock free space in the buffer
6803 result = dsBuffer->Lock( nextWritePointer, bufferBytes, &buffer1,
6804 &bufferSize1, &buffer2, &bufferSize2, 0 );
6805 if ( FAILED( result ) ) {
6806 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking buffer during playback!";
6807 errorText_ = errorStream_.str();
6808 MUTEX_UNLOCK( &stream_.mutex );
6809 error( RtAudioError::SYSTEM_ERROR );
6813 // Copy our buffer into the DS buffer
6814 CopyMemory( buffer1, buffer, bufferSize1 );
6815 if ( buffer2 != NULL ) CopyMemory( buffer2, buffer+bufferSize1, bufferSize2 );
6817 // Update our buffer offset and unlock sound buffer
6818 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
6819 if ( FAILED( result ) ) {
6820 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking buffer during playback!";
6821 errorText_ = errorStream_.str();
6822 MUTEX_UNLOCK( &stream_.mutex );
6823 error( RtAudioError::SYSTEM_ERROR );
6826 nextWritePointer = ( nextWritePointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
6827 handle->bufferPointer[0] = nextWritePointer;
6830 // Don't bother draining input
6831 if ( handle->drainCounter ) {
6832 handle->drainCounter++;
6836 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6838 // Setup parameters.
6839 if ( stream_.doConvertBuffer[1] ) {
6840 buffer = stream_.deviceBuffer;
6841 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[1];
6842 bufferBytes *= formatBytes( stream_.deviceFormat[1] );
6845 buffer = stream_.userBuffer[1];
6846 bufferBytes = stream_.bufferSize * stream_.nUserChannels[1];
6847 bufferBytes *= formatBytes( stream_.userFormat );
6850 LPDIRECTSOUNDCAPTUREBUFFER dsBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6851 long nextReadPointer = handle->bufferPointer[1];
6852 DWORD dsBufferSize = handle->dsBufferSize[1];
6854 // Find out where the write and "safe read" pointers are.
6855 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
6856 if ( FAILED( result ) ) {
6857 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6858 errorText_ = errorStream_.str();
6859 MUTEX_UNLOCK( &stream_.mutex );
6860 error( RtAudioError::SYSTEM_ERROR );
6864 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
6865 DWORD endRead = nextReadPointer + bufferBytes;
6867 // Handling depends on whether we are INPUT or DUPLEX.
6868 // If we're in INPUT mode then waiting is a good thing. If we're in DUPLEX mode,
6869 // then a wait here will drag the write pointers into the forbidden zone.
6871 // In DUPLEX mode, rather than wait, we will back off the read pointer until
6872 // it's in a safe position. This causes dropouts, but it seems to be the only
6873 // practical way to sync up the read and write pointers reliably, given the
6874 // the very complex relationship between phase and increment of the read and write
6877 // In order to minimize audible dropouts in DUPLEX mode, we will
6878 // provide a pre-roll period of 0.5 seconds in which we return
6879 // zeros from the read buffer while the pointers sync up.
6881 if ( stream_.mode == DUPLEX ) {
6882 if ( safeReadPointer < endRead ) {
6883 if ( duplexPrerollBytes <= 0 ) {
6884 // Pre-roll time over. Be more agressive.
6885 int adjustment = endRead-safeReadPointer;
6887 handle->xrun[1] = true;
6889 // - large adjustments: we've probably run out of CPU cycles, so just resync exactly,
6890 // and perform fine adjustments later.
6891 // - small adjustments: back off by twice as much.
6892 if ( adjustment >= 2*bufferBytes )
6893 nextReadPointer = safeReadPointer-2*bufferBytes;
6895 nextReadPointer = safeReadPointer-bufferBytes-adjustment;
6897 if ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
6901 // In pre=roll time. Just do it.
6902 nextReadPointer = safeReadPointer - bufferBytes;
6903 while ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
6905 endRead = nextReadPointer + bufferBytes;
6908 else { // mode == INPUT
6909 while ( safeReadPointer < endRead && stream_.callbackInfo.isRunning ) {
6910 // See comments for playback.
6911 double millis = (endRead - safeReadPointer) * 1000.0;
6912 millis /= ( formatBytes(stream_.deviceFormat[1]) * stream_.nDeviceChannels[1] * stream_.sampleRate);
6913 if ( millis < 1.0 ) millis = 1.0;
6914 Sleep( (DWORD) millis );
6916 // Wake up and find out where we are now.
6917 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
6918 if ( FAILED( result ) ) {
6919 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6920 errorText_ = errorStream_.str();
6921 MUTEX_UNLOCK( &stream_.mutex );
6922 error( RtAudioError::SYSTEM_ERROR );
6926 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
6930 // Lock free space in the buffer
6931 result = dsBuffer->Lock( nextReadPointer, bufferBytes, &buffer1,
6932 &bufferSize1, &buffer2, &bufferSize2, 0 );
6933 if ( FAILED( result ) ) {
6934 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking capture buffer!";
6935 errorText_ = errorStream_.str();
6936 MUTEX_UNLOCK( &stream_.mutex );
6937 error( RtAudioError::SYSTEM_ERROR );
6941 if ( duplexPrerollBytes <= 0 ) {
6942 // Copy our buffer into the DS buffer
6943 CopyMemory( buffer, buffer1, bufferSize1 );
6944 if ( buffer2 != NULL ) CopyMemory( buffer+bufferSize1, buffer2, bufferSize2 );
6947 memset( buffer, 0, bufferSize1 );
6948 if ( buffer2 != NULL ) memset( buffer + bufferSize1, 0, bufferSize2 );
6949 duplexPrerollBytes -= bufferSize1 + bufferSize2;
6952 // Update our buffer offset and unlock sound buffer
6953 nextReadPointer = ( nextReadPointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
6954 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
6955 if ( FAILED( result ) ) {
6956 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking capture buffer!";
6957 errorText_ = errorStream_.str();
6958 MUTEX_UNLOCK( &stream_.mutex );
6959 error( RtAudioError::SYSTEM_ERROR );
6962 handle->bufferPointer[1] = nextReadPointer;
6964 // No byte swapping necessary in DirectSound implementation.
6966 // If necessary, convert 8-bit data from unsigned to signed.
6967 if ( stream_.deviceFormat[1] == RTAUDIO_SINT8 )
6968 for ( int j=0; j<bufferBytes; j++ ) buffer[j] = (signed char) ( buffer[j] - 128 );
6970 // Do buffer conversion if necessary.
6971 if ( stream_.doConvertBuffer[1] )
6972 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
6976 MUTEX_UNLOCK( &stream_.mutex );
6977 RtApi::tickStreamTime();
6980 // Definitions for utility functions and callbacks
6981 // specific to the DirectSound implementation.
6983 static unsigned __stdcall callbackHandler( void *ptr )
6985 CallbackInfo *info = (CallbackInfo *) ptr;
6986 RtApiDs *object = (RtApiDs *) info->object;
6987 bool* isRunning = &info->isRunning;
6989 while ( *isRunning == true ) {
6990 object->callbackEvent();
6997 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
6998 LPCTSTR description,
7002 struct DsProbeData& probeInfo = *(struct DsProbeData*) lpContext;
7003 std::vector<struct DsDevice>& dsDevices = *probeInfo.dsDevices;
7006 bool validDevice = false;
7007 if ( probeInfo.isInput == true ) {
7009 LPDIRECTSOUNDCAPTURE object;
7011 hr = DirectSoundCaptureCreate( lpguid, &object, NULL );
7012 if ( hr != DS_OK ) return TRUE;
7014 caps.dwSize = sizeof(caps);
7015 hr = object->GetCaps( &caps );
7016 if ( hr == DS_OK ) {
7017 if ( caps.dwChannels > 0 && caps.dwFormats > 0 )
7024 LPDIRECTSOUND object;
7025 hr = DirectSoundCreate( lpguid, &object, NULL );
7026 if ( hr != DS_OK ) return TRUE;
7028 caps.dwSize = sizeof(caps);
7029 hr = object->GetCaps( &caps );
7030 if ( hr == DS_OK ) {
7031 if ( caps.dwFlags & DSCAPS_PRIMARYMONO || caps.dwFlags & DSCAPS_PRIMARYSTEREO )
7037 // If good device, then save its name and guid.
7038 std::string name = convertCharPointerToStdString( description );
7039 //if ( name == "Primary Sound Driver" || name == "Primary Sound Capture Driver" )
7040 if ( lpguid == NULL )
7041 name = "Default Device";
7042 if ( validDevice ) {
7043 for ( unsigned int i=0; i<dsDevices.size(); i++ ) {
7044 if ( dsDevices[i].name == name ) {
7045 dsDevices[i].found = true;
7046 if ( probeInfo.isInput ) {
7047 dsDevices[i].id[1] = lpguid;
7048 dsDevices[i].validId[1] = true;
7051 dsDevices[i].id[0] = lpguid;
7052 dsDevices[i].validId[0] = true;
7060 device.found = true;
7061 if ( probeInfo.isInput ) {
7062 device.id[1] = lpguid;
7063 device.validId[1] = true;
7066 device.id[0] = lpguid;
7067 device.validId[0] = true;
7069 dsDevices.push_back( device );
7075 static const char* getErrorString( int code )
7079 case DSERR_ALLOCATED:
7080 return "Already allocated";
7082 case DSERR_CONTROLUNAVAIL:
7083 return "Control unavailable";
7085 case DSERR_INVALIDPARAM:
7086 return "Invalid parameter";
7088 case DSERR_INVALIDCALL:
7089 return "Invalid call";
7092 return "Generic error";
7094 case DSERR_PRIOLEVELNEEDED:
7095 return "Priority level needed";
7097 case DSERR_OUTOFMEMORY:
7098 return "Out of memory";
7100 case DSERR_BADFORMAT:
7101 return "The sample rate or the channel format is not supported";
7103 case DSERR_UNSUPPORTED:
7104 return "Not supported";
7106 case DSERR_NODRIVER:
7109 case DSERR_ALREADYINITIALIZED:
7110 return "Already initialized";
7112 case DSERR_NOAGGREGATION:
7113 return "No aggregation";
7115 case DSERR_BUFFERLOST:
7116 return "Buffer lost";
7118 case DSERR_OTHERAPPHASPRIO:
7119 return "Another application already has priority";
7121 case DSERR_UNINITIALIZED:
7122 return "Uninitialized";
7125 return "DirectSound unknown error";
7128 //******************** End of __WINDOWS_DS__ *********************//
7132 #if defined(__LINUX_ALSA__)
7134 #include <alsa/asoundlib.h>
7137 // A structure to hold various information related to the ALSA API
7140 snd_pcm_t *handles[2];
7143 pthread_cond_t runnable_cv;
7147 :synchronized(false), runnable(false) { xrun[0] = false; xrun[1] = false; }
7150 static void *alsaCallbackHandler( void * ptr );
7152 RtApiAlsa :: RtApiAlsa()
7154 // Nothing to do here.
7157 RtApiAlsa :: ~RtApiAlsa()
7159 if ( stream_.state != STREAM_CLOSED ) closeStream();
7162 unsigned int RtApiAlsa :: getDeviceCount( void )
7164 unsigned nDevices = 0;
7165 int result, subdevice, card;
7167 snd_ctl_t *handle = 0;
7169 // Count cards and devices
7171 snd_card_next( &card );
7172 while ( card >= 0 ) {
7173 sprintf( name, "hw:%d", card );
7174 result = snd_ctl_open( &handle, name, 0 );
7177 errorStream_ << "RtApiAlsa::getDeviceCount: control open, card = " << card << ", " << snd_strerror( result ) << ".";
7178 errorText_ = errorStream_.str();
7179 error( RtAudioError::WARNING );
7184 result = snd_ctl_pcm_next_device( handle, &subdevice );
7186 errorStream_ << "RtApiAlsa::getDeviceCount: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
7187 errorText_ = errorStream_.str();
7188 error( RtAudioError::WARNING );
7191 if ( subdevice < 0 )
7197 snd_ctl_close( handle );
7198 snd_card_next( &card );
7201 result = snd_ctl_open( &handle, "default", 0 );
7204 snd_ctl_close( handle );
7210 RtAudio::DeviceInfo RtApiAlsa :: getDeviceInfo( unsigned int device )
7212 RtAudio::DeviceInfo info;
7213 info.probed = false;
7215 unsigned nDevices = 0;
7216 int result, subdevice, card;
7218 snd_ctl_t *chandle = 0;
7220 // Count cards and devices
7223 snd_card_next( &card );
7224 while ( card >= 0 ) {
7225 sprintf( name, "hw:%d", card );
7226 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
7229 errorStream_ << "RtApiAlsa::getDeviceInfo: control open, card = " << card << ", " << snd_strerror( result ) << ".";
7230 errorText_ = errorStream_.str();
7231 error( RtAudioError::WARNING );
7236 result = snd_ctl_pcm_next_device( chandle, &subdevice );
7238 errorStream_ << "RtApiAlsa::getDeviceInfo: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
7239 errorText_ = errorStream_.str();
7240 error( RtAudioError::WARNING );
7243 if ( subdevice < 0 ) break;
7244 if ( nDevices == device ) {
7245 sprintf( name, "hw:%d,%d", card, subdevice );
7252 snd_ctl_close( chandle );
7253 snd_card_next( &card );
7256 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
7257 if ( result == 0 ) {
7258 if ( nDevices == device ) {
7259 strcpy( name, "default" );
7265 if ( nDevices == 0 ) {
7266 errorText_ = "RtApiAlsa::getDeviceInfo: no devices found!";
7267 error( RtAudioError::INVALID_USE );
7271 if ( device >= nDevices ) {
7272 errorText_ = "RtApiAlsa::getDeviceInfo: device ID is invalid!";
7273 error( RtAudioError::INVALID_USE );
7279 // If a stream is already open, we cannot probe the stream devices.
7280 // Thus, use the saved results.
7281 if ( stream_.state != STREAM_CLOSED &&
7282 ( stream_.device[0] == device || stream_.device[1] == device ) ) {
7283 snd_ctl_close( chandle );
7284 if ( device >= devices_.size() ) {
7285 errorText_ = "RtApiAlsa::getDeviceInfo: device ID was not present before stream was opened.";
7286 error( RtAudioError::WARNING );
7289 return devices_[ device ];
7292 int openMode = SND_PCM_ASYNC;
7293 snd_pcm_stream_t stream;
7294 snd_pcm_info_t *pcminfo;
7295 snd_pcm_info_alloca( &pcminfo );
7297 snd_pcm_hw_params_t *params;
7298 snd_pcm_hw_params_alloca( ¶ms );
7300 // First try for playback unless default device (which has subdev -1)
7301 stream = SND_PCM_STREAM_PLAYBACK;
7302 snd_pcm_info_set_stream( pcminfo, stream );
7303 if ( subdevice != -1 ) {
7304 snd_pcm_info_set_device( pcminfo, subdevice );
7305 snd_pcm_info_set_subdevice( pcminfo, 0 );
7307 result = snd_ctl_pcm_info( chandle, pcminfo );
7309 // Device probably doesn't support playback.
7314 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK );
7316 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7317 errorText_ = errorStream_.str();
7318 error( RtAudioError::WARNING );
7322 // The device is open ... fill the parameter structure.
7323 result = snd_pcm_hw_params_any( phandle, params );
7325 snd_pcm_close( phandle );
7326 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7327 errorText_ = errorStream_.str();
7328 error( RtAudioError::WARNING );
7332 // Get output channel information.
7334 result = snd_pcm_hw_params_get_channels_max( params, &value );
7336 snd_pcm_close( phandle );
7337 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") output channels, " << snd_strerror( result ) << ".";
7338 errorText_ = errorStream_.str();
7339 error( RtAudioError::WARNING );
7342 info.outputChannels = value;
7343 snd_pcm_close( phandle );
7346 stream = SND_PCM_STREAM_CAPTURE;
7347 snd_pcm_info_set_stream( pcminfo, stream );
7349 // Now try for capture unless default device (with subdev = -1)
7350 if ( subdevice != -1 ) {
7351 result = snd_ctl_pcm_info( chandle, pcminfo );
7352 snd_ctl_close( chandle );
7354 // Device probably doesn't support capture.
7355 if ( info.outputChannels == 0 ) return info;
7356 goto probeParameters;
7360 snd_ctl_close( chandle );
7362 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
7364 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7365 errorText_ = errorStream_.str();
7366 error( RtAudioError::WARNING );
7367 if ( info.outputChannels == 0 ) return info;
7368 goto probeParameters;
7371 // The device is open ... fill the parameter structure.
7372 result = snd_pcm_hw_params_any( phandle, params );
7374 snd_pcm_close( phandle );
7375 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7376 errorText_ = errorStream_.str();
7377 error( RtAudioError::WARNING );
7378 if ( info.outputChannels == 0 ) return info;
7379 goto probeParameters;
7382 result = snd_pcm_hw_params_get_channels_max( params, &value );
7384 snd_pcm_close( phandle );
7385 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") input channels, " << snd_strerror( result ) << ".";
7386 errorText_ = errorStream_.str();
7387 error( RtAudioError::WARNING );
7388 if ( info.outputChannels == 0 ) return info;
7389 goto probeParameters;
7391 info.inputChannels = value;
7392 snd_pcm_close( phandle );
7394 // If device opens for both playback and capture, we determine the channels.
7395 if ( info.outputChannels > 0 && info.inputChannels > 0 )
7396 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
7398 // ALSA doesn't provide default devices so we'll use the first available one.
7399 if ( device == 0 && info.outputChannels > 0 )
7400 info.isDefaultOutput = true;
7401 if ( device == 0 && info.inputChannels > 0 )
7402 info.isDefaultInput = true;
7405 // At this point, we just need to figure out the supported data
7406 // formats and sample rates. We'll proceed by opening the device in
7407 // the direction with the maximum number of channels, or playback if
7408 // they are equal. This might limit our sample rate options, but so
7411 if ( info.outputChannels >= info.inputChannels )
7412 stream = SND_PCM_STREAM_PLAYBACK;
7414 stream = SND_PCM_STREAM_CAPTURE;
7415 snd_pcm_info_set_stream( pcminfo, stream );
7417 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
7419 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7420 errorText_ = errorStream_.str();
7421 error( RtAudioError::WARNING );
7425 // The device is open ... fill the parameter structure.
7426 result = snd_pcm_hw_params_any( phandle, params );
7428 snd_pcm_close( phandle );
7429 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7430 errorText_ = errorStream_.str();
7431 error( RtAudioError::WARNING );
7435 // Test our discrete set of sample rate values.
7436 info.sampleRates.clear();
7437 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
7438 if ( snd_pcm_hw_params_test_rate( phandle, params, SAMPLE_RATES[i], 0 ) == 0 ) {
7439 info.sampleRates.push_back( SAMPLE_RATES[i] );
7441 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
7442 info.preferredSampleRate = SAMPLE_RATES[i];
7445 if ( info.sampleRates.size() == 0 ) {
7446 snd_pcm_close( phandle );
7447 errorStream_ << "RtApiAlsa::getDeviceInfo: no supported sample rates found for device (" << name << ").";
7448 errorText_ = errorStream_.str();
7449 error( RtAudioError::WARNING );
7453 // Probe the supported data formats ... we don't care about endian-ness just yet
7454 snd_pcm_format_t format;
7455 info.nativeFormats = 0;
7456 format = SND_PCM_FORMAT_S8;
7457 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7458 info.nativeFormats |= RTAUDIO_SINT8;
7459 format = SND_PCM_FORMAT_S16;
7460 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7461 info.nativeFormats |= RTAUDIO_SINT16;
7462 format = SND_PCM_FORMAT_S24;
7463 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7464 info.nativeFormats |= RTAUDIO_SINT24;
7465 format = SND_PCM_FORMAT_S32;
7466 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7467 info.nativeFormats |= RTAUDIO_SINT32;
7468 format = SND_PCM_FORMAT_FLOAT;
7469 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7470 info.nativeFormats |= RTAUDIO_FLOAT32;
7471 format = SND_PCM_FORMAT_FLOAT64;
7472 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7473 info.nativeFormats |= RTAUDIO_FLOAT64;
7475 // Check that we have at least one supported format
7476 if ( info.nativeFormats == 0 ) {
7477 snd_pcm_close( phandle );
7478 errorStream_ << "RtApiAlsa::getDeviceInfo: pcm device (" << name << ") data format not supported by RtAudio.";
7479 errorText_ = errorStream_.str();
7480 error( RtAudioError::WARNING );
7484 // Get the device name
7486 result = snd_card_get_name( card, &cardname );
7487 if ( result >= 0 ) {
7488 sprintf( name, "hw:%s,%d", cardname, subdevice );
7493 // That's all ... close the device and return
7494 snd_pcm_close( phandle );
7499 void RtApiAlsa :: saveDeviceInfo( void )
7503 unsigned int nDevices = getDeviceCount();
7504 devices_.resize( nDevices );
7505 for ( unsigned int i=0; i<nDevices; i++ )
7506 devices_[i] = getDeviceInfo( i );
7509 bool RtApiAlsa :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
7510 unsigned int firstChannel, unsigned int sampleRate,
7511 RtAudioFormat format, unsigned int *bufferSize,
7512 RtAudio::StreamOptions *options )
7515 #if defined(__RTAUDIO_DEBUG__)
7517 snd_output_stdio_attach(&out, stderr, 0);
7520 // I'm not using the "plug" interface ... too much inconsistent behavior.
7522 unsigned nDevices = 0;
7523 int result, subdevice, card;
7527 if ( options && options->flags & RTAUDIO_ALSA_USE_DEFAULT )
7528 snprintf(name, sizeof(name), "%s", "default");
7530 // Count cards and devices
7532 snd_card_next( &card );
7533 while ( card >= 0 ) {
7534 sprintf( name, "hw:%d", card );
7535 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
7537 errorStream_ << "RtApiAlsa::probeDeviceOpen: control open, card = " << card << ", " << snd_strerror( result ) << ".";
7538 errorText_ = errorStream_.str();
7543 result = snd_ctl_pcm_next_device( chandle, &subdevice );
7544 if ( result < 0 ) break;
7545 if ( subdevice < 0 ) break;
7546 if ( nDevices == device ) {
7547 sprintf( name, "hw:%d,%d", card, subdevice );
7548 snd_ctl_close( chandle );
7553 snd_ctl_close( chandle );
7554 snd_card_next( &card );
7557 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
7558 if ( result == 0 ) {
7559 if ( nDevices == device ) {
7560 strcpy( name, "default" );
7561 snd_ctl_close( chandle );
7566 snd_ctl_close( chandle );
7568 if ( nDevices == 0 ) {
7569 // This should not happen because a check is made before this function is called.
7570 errorText_ = "RtApiAlsa::probeDeviceOpen: no devices found!";
7574 if ( device >= nDevices ) {
7575 // This should not happen because a check is made before this function is called.
7576 errorText_ = "RtApiAlsa::probeDeviceOpen: device ID is invalid!";
7583 // The getDeviceInfo() function will not work for a device that is
7584 // already open. Thus, we'll probe the system before opening a
7585 // stream and save the results for use by getDeviceInfo().
7586 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) // only do once
7587 this->saveDeviceInfo();
7589 snd_pcm_stream_t stream;
7590 if ( mode == OUTPUT )
7591 stream = SND_PCM_STREAM_PLAYBACK;
7593 stream = SND_PCM_STREAM_CAPTURE;
7596 int openMode = SND_PCM_ASYNC;
7597 result = snd_pcm_open( &phandle, name, stream, openMode );
7599 if ( mode == OUTPUT )
7600 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for output.";
7602 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for input.";
7603 errorText_ = errorStream_.str();
7607 // Fill the parameter structure.
7608 snd_pcm_hw_params_t *hw_params;
7609 snd_pcm_hw_params_alloca( &hw_params );
7610 result = snd_pcm_hw_params_any( phandle, hw_params );
7612 snd_pcm_close( phandle );
7613 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") parameters, " << snd_strerror( result ) << ".";
7614 errorText_ = errorStream_.str();
7618 #if defined(__RTAUDIO_DEBUG__)
7619 fprintf( stderr, "\nRtApiAlsa: dump hardware params just after device open:\n\n" );
7620 snd_pcm_hw_params_dump( hw_params, out );
7623 // Set access ... check user preference.
7624 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) {
7625 stream_.userInterleaved = false;
7626 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
7628 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
7629 stream_.deviceInterleaved[mode] = true;
7632 stream_.deviceInterleaved[mode] = false;
7635 stream_.userInterleaved = true;
7636 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
7638 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
7639 stream_.deviceInterleaved[mode] = false;
7642 stream_.deviceInterleaved[mode] = true;
7646 snd_pcm_close( phandle );
7647 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") access, " << snd_strerror( result ) << ".";
7648 errorText_ = errorStream_.str();
7652 // Determine how to set the device format.
7653 stream_.userFormat = format;
7654 snd_pcm_format_t deviceFormat = SND_PCM_FORMAT_UNKNOWN;
7656 if ( format == RTAUDIO_SINT8 )
7657 deviceFormat = SND_PCM_FORMAT_S8;
7658 else if ( format == RTAUDIO_SINT16 )
7659 deviceFormat = SND_PCM_FORMAT_S16;
7660 else if ( format == RTAUDIO_SINT24 )
7661 deviceFormat = SND_PCM_FORMAT_S24;
7662 else if ( format == RTAUDIO_SINT32 )
7663 deviceFormat = SND_PCM_FORMAT_S32;
7664 else if ( format == RTAUDIO_FLOAT32 )
7665 deviceFormat = SND_PCM_FORMAT_FLOAT;
7666 else if ( format == RTAUDIO_FLOAT64 )
7667 deviceFormat = SND_PCM_FORMAT_FLOAT64;
7669 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat) == 0) {
7670 stream_.deviceFormat[mode] = format;
7674 // The user requested format is not natively supported by the device.
7675 deviceFormat = SND_PCM_FORMAT_FLOAT64;
7676 if ( snd_pcm_hw_params_test_format( phandle, hw_params, deviceFormat ) == 0 ) {
7677 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
7681 deviceFormat = SND_PCM_FORMAT_FLOAT;
7682 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7683 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
7687 deviceFormat = SND_PCM_FORMAT_S32;
7688 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7689 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
7693 deviceFormat = SND_PCM_FORMAT_S24;
7694 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7695 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
7699 deviceFormat = SND_PCM_FORMAT_S16;
7700 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7701 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
7705 deviceFormat = SND_PCM_FORMAT_S8;
7706 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7707 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
7711 // If we get here, no supported format was found.
7712 snd_pcm_close( phandle );
7713 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device " << device << " data format not supported by RtAudio.";
7714 errorText_ = errorStream_.str();
7718 result = snd_pcm_hw_params_set_format( phandle, hw_params, deviceFormat );
7720 snd_pcm_close( phandle );
7721 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") data format, " << snd_strerror( result ) << ".";
7722 errorText_ = errorStream_.str();
7726 // Determine whether byte-swaping is necessary.
7727 stream_.doByteSwap[mode] = false;
7728 if ( deviceFormat != SND_PCM_FORMAT_S8 ) {
7729 result = snd_pcm_format_cpu_endian( deviceFormat );
7731 stream_.doByteSwap[mode] = true;
7732 else if (result < 0) {
7733 snd_pcm_close( phandle );
7734 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") endian-ness, " << snd_strerror( result ) << ".";
7735 errorText_ = errorStream_.str();
7740 // Set the sample rate.
7741 result = snd_pcm_hw_params_set_rate_near( phandle, hw_params, (unsigned int*) &sampleRate, 0 );
7743 snd_pcm_close( phandle );
7744 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting sample rate on device (" << name << "), " << snd_strerror( result ) << ".";
7745 errorText_ = errorStream_.str();
7749 // Determine the number of channels for this device. We support a possible
7750 // minimum device channel number > than the value requested by the user.
7751 stream_.nUserChannels[mode] = channels;
7753 result = snd_pcm_hw_params_get_channels_max( hw_params, &value );
7754 unsigned int deviceChannels = value;
7755 if ( result < 0 || deviceChannels < channels + firstChannel ) {
7756 snd_pcm_close( phandle );
7757 errorStream_ << "RtApiAlsa::probeDeviceOpen: requested channel parameters not supported by device (" << name << "), " << snd_strerror( result ) << ".";
7758 errorText_ = errorStream_.str();
7762 result = snd_pcm_hw_params_get_channels_min( hw_params, &value );
7764 snd_pcm_close( phandle );
7765 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting minimum channels for device (" << name << "), " << snd_strerror( result ) << ".";
7766 errorText_ = errorStream_.str();
7769 deviceChannels = value;
7770 if ( deviceChannels < channels + firstChannel ) deviceChannels = channels + firstChannel;
7771 stream_.nDeviceChannels[mode] = deviceChannels;
7773 // Set the device channels.
7774 result = snd_pcm_hw_params_set_channels( phandle, hw_params, deviceChannels );
7776 snd_pcm_close( phandle );
7777 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting channels for device (" << name << "), " << snd_strerror( result ) << ".";
7778 errorText_ = errorStream_.str();
7782 // Set the buffer (or period) size.
7784 snd_pcm_uframes_t periodSize = *bufferSize;
7785 result = snd_pcm_hw_params_set_period_size_near( phandle, hw_params, &periodSize, &dir );
7787 snd_pcm_close( phandle );
7788 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting period size for device (" << name << "), " << snd_strerror( result ) << ".";
7789 errorText_ = errorStream_.str();
7792 *bufferSize = periodSize;
7794 // Set the buffer number, which in ALSA is referred to as the "period".
7795 unsigned int periods = 0;
7796 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) periods = 2;
7797 if ( options && options->numberOfBuffers > 0 ) periods = options->numberOfBuffers;
7798 if ( periods < 2 ) periods = 4; // a fairly safe default value
7799 result = snd_pcm_hw_params_set_periods_near( phandle, hw_params, &periods, &dir );
7801 snd_pcm_close( phandle );
7802 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting periods for device (" << name << "), " << snd_strerror( result ) << ".";
7803 errorText_ = errorStream_.str();
7807 // If attempting to setup a duplex stream, the bufferSize parameter
7808 // MUST be the same in both directions!
7809 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
7810 snd_pcm_close( phandle );
7811 errorStream_ << "RtApiAlsa::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << name << ").";
7812 errorText_ = errorStream_.str();
7816 stream_.bufferSize = *bufferSize;
7818 // Install the hardware configuration
7819 result = snd_pcm_hw_params( phandle, hw_params );
7821 snd_pcm_close( phandle );
7822 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing hardware configuration on device (" << name << "), " << snd_strerror( result ) << ".";
7823 errorText_ = errorStream_.str();
7827 #if defined(__RTAUDIO_DEBUG__)
7828 fprintf(stderr, "\nRtApiAlsa: dump hardware params after installation:\n\n");
7829 snd_pcm_hw_params_dump( hw_params, out );
7832 // Set the software configuration to fill buffers with zeros and prevent device stopping on xruns.
7833 snd_pcm_sw_params_t *sw_params = NULL;
7834 snd_pcm_sw_params_alloca( &sw_params );
7835 snd_pcm_sw_params_current( phandle, sw_params );
7836 snd_pcm_sw_params_set_start_threshold( phandle, sw_params, *bufferSize );
7837 snd_pcm_sw_params_set_stop_threshold( phandle, sw_params, ULONG_MAX );
7838 snd_pcm_sw_params_set_silence_threshold( phandle, sw_params, 0 );
7840 // The following two settings were suggested by Theo Veenker
7841 //snd_pcm_sw_params_set_avail_min( phandle, sw_params, *bufferSize );
7842 //snd_pcm_sw_params_set_xfer_align( phandle, sw_params, 1 );
7844 // here are two options for a fix
7845 //snd_pcm_sw_params_set_silence_size( phandle, sw_params, ULONG_MAX );
7846 snd_pcm_uframes_t val;
7847 snd_pcm_sw_params_get_boundary( sw_params, &val );
7848 snd_pcm_sw_params_set_silence_size( phandle, sw_params, val );
7850 result = snd_pcm_sw_params( phandle, sw_params );
7852 snd_pcm_close( phandle );
7853 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing software configuration on device (" << name << "), " << snd_strerror( result ) << ".";
7854 errorText_ = errorStream_.str();
7858 #if defined(__RTAUDIO_DEBUG__)
7859 fprintf(stderr, "\nRtApiAlsa: dump software params after installation:\n\n");
7860 snd_pcm_sw_params_dump( sw_params, out );
7863 // Set flags for buffer conversion
7864 stream_.doConvertBuffer[mode] = false;
7865 if ( stream_.userFormat != stream_.deviceFormat[mode] )
7866 stream_.doConvertBuffer[mode] = true;
7867 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
7868 stream_.doConvertBuffer[mode] = true;
7869 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
7870 stream_.nUserChannels[mode] > 1 )
7871 stream_.doConvertBuffer[mode] = true;
7873 // Allocate the ApiHandle if necessary and then save.
7874 AlsaHandle *apiInfo = 0;
7875 if ( stream_.apiHandle == 0 ) {
7877 apiInfo = (AlsaHandle *) new AlsaHandle;
7879 catch ( std::bad_alloc& ) {
7880 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating AlsaHandle memory.";
7884 if ( pthread_cond_init( &apiInfo->runnable_cv, NULL ) ) {
7885 errorText_ = "RtApiAlsa::probeDeviceOpen: error initializing pthread condition variable.";
7889 stream_.apiHandle = (void *) apiInfo;
7890 apiInfo->handles[0] = 0;
7891 apiInfo->handles[1] = 0;
7894 apiInfo = (AlsaHandle *) stream_.apiHandle;
7896 apiInfo->handles[mode] = phandle;
7899 // Allocate necessary internal buffers.
7900 unsigned long bufferBytes;
7901 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
7902 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
7903 if ( stream_.userBuffer[mode] == NULL ) {
7904 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating user buffer memory.";
7908 if ( stream_.doConvertBuffer[mode] ) {
7910 bool makeBuffer = true;
7911 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
7912 if ( mode == INPUT ) {
7913 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
7914 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
7915 if ( bufferBytes <= bytesOut ) makeBuffer = false;
7920 bufferBytes *= *bufferSize;
7921 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
7922 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
7923 if ( stream_.deviceBuffer == NULL ) {
7924 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating device buffer memory.";
7930 stream_.sampleRate = sampleRate;
7931 stream_.nBuffers = periods;
7932 stream_.device[mode] = device;
7933 stream_.state = STREAM_STOPPED;
7935 // Setup the buffer conversion information structure.
7936 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
7938 // Setup thread if necessary.
7939 if ( stream_.mode == OUTPUT && mode == INPUT ) {
7940 // We had already set up an output stream.
7941 stream_.mode = DUPLEX;
7942 // Link the streams if possible.
7943 apiInfo->synchronized = false;
7944 if ( snd_pcm_link( apiInfo->handles[0], apiInfo->handles[1] ) == 0 )
7945 apiInfo->synchronized = true;
7947 errorText_ = "RtApiAlsa::probeDeviceOpen: unable to synchronize input and output devices.";
7948 error( RtAudioError::WARNING );
7952 stream_.mode = mode;
7954 // Setup callback thread.
7955 stream_.callbackInfo.object = (void *) this;
7957 // Set the thread attributes for joinable and realtime scheduling
7958 // priority (optional). The higher priority will only take affect
7959 // if the program is run as root or suid. Note, under Linux
7960 // processes with CAP_SYS_NICE privilege, a user can change
7961 // scheduling policy and priority (thus need not be root). See
7962 // POSIX "capabilities".
7963 pthread_attr_t attr;
7964 pthread_attr_init( &attr );
7965 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
7966 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
7967 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
7968 stream_.callbackInfo.doRealtime = true;
7969 struct sched_param param;
7970 int priority = options->priority;
7971 int min = sched_get_priority_min( SCHED_RR );
7972 int max = sched_get_priority_max( SCHED_RR );
7973 if ( priority < min ) priority = min;
7974 else if ( priority > max ) priority = max;
7975 param.sched_priority = priority;
7977 // Set the policy BEFORE the priority. Otherwise it fails.
7978 pthread_attr_setschedpolicy(&attr, SCHED_RR);
7979 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
7980 // This is definitely required. Otherwise it fails.
7981 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
7982 pthread_attr_setschedparam(&attr, ¶m);
7985 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
7987 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
7990 stream_.callbackInfo.isRunning = true;
7991 result = pthread_create( &stream_.callbackInfo.thread, &attr, alsaCallbackHandler, &stream_.callbackInfo );
7992 pthread_attr_destroy( &attr );
7994 // Failed. Try instead with default attributes.
7995 result = pthread_create( &stream_.callbackInfo.thread, NULL, alsaCallbackHandler, &stream_.callbackInfo );
7997 stream_.callbackInfo.isRunning = false;
7998 errorText_ = "RtApiAlsa::error creating callback thread!";
8008 pthread_cond_destroy( &apiInfo->runnable_cv );
8009 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
8010 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
8012 stream_.apiHandle = 0;
8015 if ( phandle) snd_pcm_close( phandle );
8017 for ( int i=0; i<2; i++ ) {
8018 if ( stream_.userBuffer[i] ) {
8019 free( stream_.userBuffer[i] );
8020 stream_.userBuffer[i] = 0;
8024 if ( stream_.deviceBuffer ) {
8025 free( stream_.deviceBuffer );
8026 stream_.deviceBuffer = 0;
8029 stream_.state = STREAM_CLOSED;
8033 void RtApiAlsa :: closeStream()
8035 if ( stream_.state == STREAM_CLOSED ) {
8036 errorText_ = "RtApiAlsa::closeStream(): no open stream to close!";
8037 error( RtAudioError::WARNING );
8041 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8042 stream_.callbackInfo.isRunning = false;
8043 MUTEX_LOCK( &stream_.mutex );
8044 if ( stream_.state == STREAM_STOPPED ) {
8045 apiInfo->runnable = true;
8046 pthread_cond_signal( &apiInfo->runnable_cv );
8048 MUTEX_UNLOCK( &stream_.mutex );
8049 pthread_join( stream_.callbackInfo.thread, NULL );
8051 if ( stream_.state == STREAM_RUNNING ) {
8052 stream_.state = STREAM_STOPPED;
8053 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
8054 snd_pcm_drop( apiInfo->handles[0] );
8055 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
8056 snd_pcm_drop( apiInfo->handles[1] );
8060 pthread_cond_destroy( &apiInfo->runnable_cv );
8061 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
8062 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
8064 stream_.apiHandle = 0;
8067 for ( int i=0; i<2; i++ ) {
8068 if ( stream_.userBuffer[i] ) {
8069 free( stream_.userBuffer[i] );
8070 stream_.userBuffer[i] = 0;
8074 if ( stream_.deviceBuffer ) {
8075 free( stream_.deviceBuffer );
8076 stream_.deviceBuffer = 0;
8079 stream_.mode = UNINITIALIZED;
8080 stream_.state = STREAM_CLOSED;
8083 void RtApiAlsa :: startStream()
8085 // This method calls snd_pcm_prepare if the device isn't already in that state.
8088 if ( stream_.state == STREAM_RUNNING ) {
8089 errorText_ = "RtApiAlsa::startStream(): the stream is already running!";
8090 error( RtAudioError::WARNING );
8094 MUTEX_LOCK( &stream_.mutex );
8096 #if defined( HAVE_GETTIMEOFDAY )
8097 gettimeofday( &stream_.lastTickTimestamp, NULL );
8101 snd_pcm_state_t state;
8102 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8103 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
8104 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8105 state = snd_pcm_state( handle[0] );
8106 if ( state != SND_PCM_STATE_PREPARED ) {
8107 result = snd_pcm_prepare( handle[0] );
8109 errorStream_ << "RtApiAlsa::startStream: error preparing output pcm device, " << snd_strerror( result ) << ".";
8110 errorText_ = errorStream_.str();
8116 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
8117 result = snd_pcm_drop(handle[1]); // fix to remove stale data received since device has been open
8118 state = snd_pcm_state( handle[1] );
8119 if ( state != SND_PCM_STATE_PREPARED ) {
8120 result = snd_pcm_prepare( handle[1] );
8122 errorStream_ << "RtApiAlsa::startStream: error preparing input pcm device, " << snd_strerror( result ) << ".";
8123 errorText_ = errorStream_.str();
8129 stream_.state = STREAM_RUNNING;
8132 apiInfo->runnable = true;
8133 pthread_cond_signal( &apiInfo->runnable_cv );
8134 MUTEX_UNLOCK( &stream_.mutex );
8136 if ( result >= 0 ) return;
8137 error( RtAudioError::SYSTEM_ERROR );
8140 void RtApiAlsa :: stopStream()
8143 if ( stream_.state == STREAM_STOPPED ) {
8144 errorText_ = "RtApiAlsa::stopStream(): the stream is already stopped!";
8145 error( RtAudioError::WARNING );
8149 stream_.state = STREAM_STOPPED;
8150 MUTEX_LOCK( &stream_.mutex );
8153 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8154 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
8155 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8156 if ( apiInfo->synchronized )
8157 result = snd_pcm_drop( handle[0] );
8159 result = snd_pcm_drain( handle[0] );
8161 errorStream_ << "RtApiAlsa::stopStream: error draining output pcm device, " << snd_strerror( result ) << ".";
8162 errorText_ = errorStream_.str();
8167 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
8168 result = snd_pcm_drop( handle[1] );
8170 errorStream_ << "RtApiAlsa::stopStream: error stopping input pcm device, " << snd_strerror( result ) << ".";
8171 errorText_ = errorStream_.str();
8177 apiInfo->runnable = false; // fixes high CPU usage when stopped
8178 MUTEX_UNLOCK( &stream_.mutex );
8180 if ( result >= 0 ) return;
8181 error( RtAudioError::SYSTEM_ERROR );
8184 void RtApiAlsa :: abortStream()
8187 if ( stream_.state == STREAM_STOPPED ) {
8188 errorText_ = "RtApiAlsa::abortStream(): the stream is already stopped!";
8189 error( RtAudioError::WARNING );
8193 stream_.state = STREAM_STOPPED;
8194 MUTEX_LOCK( &stream_.mutex );
8197 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8198 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
8199 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8200 result = snd_pcm_drop( handle[0] );
8202 errorStream_ << "RtApiAlsa::abortStream: error aborting output pcm device, " << snd_strerror( result ) << ".";
8203 errorText_ = errorStream_.str();
8208 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
8209 result = snd_pcm_drop( handle[1] );
8211 errorStream_ << "RtApiAlsa::abortStream: error aborting input pcm device, " << snd_strerror( result ) << ".";
8212 errorText_ = errorStream_.str();
8218 apiInfo->runnable = false; // fixes high CPU usage when stopped
8219 MUTEX_UNLOCK( &stream_.mutex );
8221 if ( result >= 0 ) return;
8222 error( RtAudioError::SYSTEM_ERROR );
8225 void RtApiAlsa :: callbackEvent()
8227 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8228 if ( stream_.state == STREAM_STOPPED ) {
8229 MUTEX_LOCK( &stream_.mutex );
8230 while ( !apiInfo->runnable )
8231 pthread_cond_wait( &apiInfo->runnable_cv, &stream_.mutex );
8233 if ( stream_.state != STREAM_RUNNING ) {
8234 MUTEX_UNLOCK( &stream_.mutex );
8237 MUTEX_UNLOCK( &stream_.mutex );
8240 if ( stream_.state == STREAM_CLOSED ) {
8241 errorText_ = "RtApiAlsa::callbackEvent(): the stream is closed ... this shouldn't happen!";
8242 error( RtAudioError::WARNING );
8246 int doStopStream = 0;
8247 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
8248 double streamTime = getStreamTime();
8249 RtAudioStreamStatus status = 0;
8250 if ( stream_.mode != INPUT && apiInfo->xrun[0] == true ) {
8251 status |= RTAUDIO_OUTPUT_UNDERFLOW;
8252 apiInfo->xrun[0] = false;
8254 if ( stream_.mode != OUTPUT && apiInfo->xrun[1] == true ) {
8255 status |= RTAUDIO_INPUT_OVERFLOW;
8256 apiInfo->xrun[1] = false;
8258 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
8259 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
8261 if ( doStopStream == 2 ) {
8266 MUTEX_LOCK( &stream_.mutex );
8268 // The state might change while waiting on a mutex.
8269 if ( stream_.state == STREAM_STOPPED ) goto unlock;
8275 snd_pcm_sframes_t frames;
8276 RtAudioFormat format;
8277 handle = (snd_pcm_t **) apiInfo->handles;
8279 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
8281 // Setup parameters.
8282 if ( stream_.doConvertBuffer[1] ) {
8283 buffer = stream_.deviceBuffer;
8284 channels = stream_.nDeviceChannels[1];
8285 format = stream_.deviceFormat[1];
8288 buffer = stream_.userBuffer[1];
8289 channels = stream_.nUserChannels[1];
8290 format = stream_.userFormat;
8293 // Read samples from device in interleaved/non-interleaved format.
8294 if ( stream_.deviceInterleaved[1] )
8295 result = snd_pcm_readi( handle[1], buffer, stream_.bufferSize );
8297 void *bufs[channels];
8298 size_t offset = stream_.bufferSize * formatBytes( format );
8299 for ( int i=0; i<channels; i++ )
8300 bufs[i] = (void *) (buffer + (i * offset));
8301 result = snd_pcm_readn( handle[1], bufs, stream_.bufferSize );
8304 if ( result < (int) stream_.bufferSize ) {
8305 // Either an error or overrun occured.
8306 if ( result == -EPIPE ) {
8307 snd_pcm_state_t state = snd_pcm_state( handle[1] );
8308 if ( state == SND_PCM_STATE_XRUN ) {
8309 apiInfo->xrun[1] = true;
8310 result = snd_pcm_prepare( handle[1] );
8312 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after overrun, " << snd_strerror( result ) << ".";
8313 errorText_ = errorStream_.str();
8317 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
8318 errorText_ = errorStream_.str();
8322 errorStream_ << "RtApiAlsa::callbackEvent: audio read error, " << snd_strerror( result ) << ".";
8323 errorText_ = errorStream_.str();
8325 error( RtAudioError::WARNING );
8329 // Do byte swapping if necessary.
8330 if ( stream_.doByteSwap[1] )
8331 byteSwapBuffer( buffer, stream_.bufferSize * channels, format );
8333 // Do buffer conversion if necessary.
8334 if ( stream_.doConvertBuffer[1] )
8335 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
8337 // Check stream latency
8338 result = snd_pcm_delay( handle[1], &frames );
8339 if ( result == 0 && frames > 0 ) stream_.latency[1] = frames;
8344 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8346 // Setup parameters and do buffer conversion if necessary.
8347 if ( stream_.doConvertBuffer[0] ) {
8348 buffer = stream_.deviceBuffer;
8349 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
8350 channels = stream_.nDeviceChannels[0];
8351 format = stream_.deviceFormat[0];
8354 buffer = stream_.userBuffer[0];
8355 channels = stream_.nUserChannels[0];
8356 format = stream_.userFormat;
8359 // Do byte swapping if necessary.
8360 if ( stream_.doByteSwap[0] )
8361 byteSwapBuffer(buffer, stream_.bufferSize * channels, format);
8363 // Write samples to device in interleaved/non-interleaved format.
8364 if ( stream_.deviceInterleaved[0] )
8365 result = snd_pcm_writei( handle[0], buffer, stream_.bufferSize );
8367 void *bufs[channels];
8368 size_t offset = stream_.bufferSize * formatBytes( format );
8369 for ( int i=0; i<channels; i++ )
8370 bufs[i] = (void *) (buffer + (i * offset));
8371 result = snd_pcm_writen( handle[0], bufs, stream_.bufferSize );
8374 if ( result < (int) stream_.bufferSize ) {
8375 // Either an error or underrun occured.
8376 if ( result == -EPIPE ) {
8377 snd_pcm_state_t state = snd_pcm_state( handle[0] );
8378 if ( state == SND_PCM_STATE_XRUN ) {
8379 apiInfo->xrun[0] = true;
8380 result = snd_pcm_prepare( handle[0] );
8382 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after underrun, " << snd_strerror( result ) << ".";
8383 errorText_ = errorStream_.str();
8386 errorText_ = "RtApiAlsa::callbackEvent: audio write error, underrun.";
8389 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
8390 errorText_ = errorStream_.str();
8394 errorStream_ << "RtApiAlsa::callbackEvent: audio write error, " << snd_strerror( result ) << ".";
8395 errorText_ = errorStream_.str();
8397 error( RtAudioError::WARNING );
8401 // Check stream latency
8402 result = snd_pcm_delay( handle[0], &frames );
8403 if ( result == 0 && frames > 0 ) stream_.latency[0] = frames;
8407 MUTEX_UNLOCK( &stream_.mutex );
8409 RtApi::tickStreamTime();
8410 if ( doStopStream == 1 ) this->stopStream();
8413 static void *alsaCallbackHandler( void *ptr )
8415 CallbackInfo *info = (CallbackInfo *) ptr;
8416 RtApiAlsa *object = (RtApiAlsa *) info->object;
8417 bool *isRunning = &info->isRunning;
8419 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
8420 if ( info->doRealtime ) {
8421 std::cerr << "RtAudio alsa: " <<
8422 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
8423 "running realtime scheduling" << std::endl;
8427 while ( *isRunning == true ) {
8428 pthread_testcancel();
8429 object->callbackEvent();
8432 pthread_exit( NULL );
8435 //******************** End of __LINUX_ALSA__ *********************//
8438 #if defined(__LINUX_PULSE__)
8440 // Code written by Peter Meerwald, pmeerw@pmeerw.net
8441 // and Tristan Matthews.
8443 #include <pulse/error.h>
8444 #include <pulse/simple.h>
8447 static const unsigned int SUPPORTED_SAMPLERATES[] = { 8000, 16000, 22050, 32000,
8448 44100, 48000, 96000, 0};
8450 struct rtaudio_pa_format_mapping_t {
8451 RtAudioFormat rtaudio_format;
8452 pa_sample_format_t pa_format;
8455 static const rtaudio_pa_format_mapping_t supported_sampleformats[] = {
8456 {RTAUDIO_SINT16, PA_SAMPLE_S16LE},
8457 {RTAUDIO_SINT32, PA_SAMPLE_S32LE},
8458 {RTAUDIO_FLOAT32, PA_SAMPLE_FLOAT32LE},
8459 {0, PA_SAMPLE_INVALID}};
8461 struct PulseAudioHandle {
8465 pthread_cond_t runnable_cv;
8467 PulseAudioHandle() : s_play(0), s_rec(0), runnable(false) { }
8470 RtApiPulse::~RtApiPulse()
8472 if ( stream_.state != STREAM_CLOSED )
8476 unsigned int RtApiPulse::getDeviceCount( void )
8481 RtAudio::DeviceInfo RtApiPulse::getDeviceInfo( unsigned int /*device*/ )
8483 RtAudio::DeviceInfo info;
8485 info.name = "PulseAudio";
8486 info.outputChannels = 2;
8487 info.inputChannels = 2;
8488 info.duplexChannels = 2;
8489 info.isDefaultOutput = true;
8490 info.isDefaultInput = true;
8492 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr )
8493 info.sampleRates.push_back( *sr );
8495 info.preferredSampleRate = 48000;
8496 info.nativeFormats = RTAUDIO_SINT16 | RTAUDIO_SINT32 | RTAUDIO_FLOAT32;
8501 static void *pulseaudio_callback( void * user )
8503 CallbackInfo *cbi = static_cast<CallbackInfo *>( user );
8504 RtApiPulse *context = static_cast<RtApiPulse *>( cbi->object );
8505 volatile bool *isRunning = &cbi->isRunning;
8507 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
8508 if (cbi->doRealtime) {
8509 std::cerr << "RtAudio pulse: " <<
8510 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
8511 "running realtime scheduling" << std::endl;
8515 while ( *isRunning ) {
8516 pthread_testcancel();
8517 context->callbackEvent();
8520 pthread_exit( NULL );
8523 void RtApiPulse::closeStream( void )
8525 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8527 stream_.callbackInfo.isRunning = false;
8529 MUTEX_LOCK( &stream_.mutex );
8530 if ( stream_.state == STREAM_STOPPED ) {
8531 pah->runnable = true;
8532 pthread_cond_signal( &pah->runnable_cv );
8534 MUTEX_UNLOCK( &stream_.mutex );
8536 pthread_join( pah->thread, 0 );
8537 if ( pah->s_play ) {
8538 pa_simple_flush( pah->s_play, NULL );
8539 pa_simple_free( pah->s_play );
8542 pa_simple_free( pah->s_rec );
8544 pthread_cond_destroy( &pah->runnable_cv );
8546 stream_.apiHandle = 0;
8549 if ( stream_.userBuffer[0] ) {
8550 free( stream_.userBuffer[0] );
8551 stream_.userBuffer[0] = 0;
8553 if ( stream_.userBuffer[1] ) {
8554 free( stream_.userBuffer[1] );
8555 stream_.userBuffer[1] = 0;
8558 stream_.state = STREAM_CLOSED;
8559 stream_.mode = UNINITIALIZED;
8562 void RtApiPulse::callbackEvent( void )
8564 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8566 if ( stream_.state == STREAM_STOPPED ) {
8567 MUTEX_LOCK( &stream_.mutex );
8568 while ( !pah->runnable )
8569 pthread_cond_wait( &pah->runnable_cv, &stream_.mutex );
8571 if ( stream_.state != STREAM_RUNNING ) {
8572 MUTEX_UNLOCK( &stream_.mutex );
8575 MUTEX_UNLOCK( &stream_.mutex );
8578 if ( stream_.state == STREAM_CLOSED ) {
8579 errorText_ = "RtApiPulse::callbackEvent(): the stream is closed ... "
8580 "this shouldn't happen!";
8581 error( RtAudioError::WARNING );
8585 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
8586 double streamTime = getStreamTime();
8587 RtAudioStreamStatus status = 0;
8588 int doStopStream = callback( stream_.userBuffer[OUTPUT], stream_.userBuffer[INPUT],
8589 stream_.bufferSize, streamTime, status,
8590 stream_.callbackInfo.userData );
8592 if ( doStopStream == 2 ) {
8597 MUTEX_LOCK( &stream_.mutex );
8598 void *pulse_in = stream_.doConvertBuffer[INPUT] ? stream_.deviceBuffer : stream_.userBuffer[INPUT];
8599 void *pulse_out = stream_.doConvertBuffer[OUTPUT] ? stream_.deviceBuffer : stream_.userBuffer[OUTPUT];
8601 if ( stream_.state != STREAM_RUNNING )
8606 if (stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8607 if ( stream_.doConvertBuffer[OUTPUT] ) {
8608 convertBuffer( stream_.deviceBuffer,
8609 stream_.userBuffer[OUTPUT],
8610 stream_.convertInfo[OUTPUT] );
8611 bytes = stream_.nDeviceChannels[OUTPUT] * stream_.bufferSize *
8612 formatBytes( stream_.deviceFormat[OUTPUT] );
8614 bytes = stream_.nUserChannels[OUTPUT] * stream_.bufferSize *
8615 formatBytes( stream_.userFormat );
8617 if ( pa_simple_write( pah->s_play, pulse_out, bytes, &pa_error ) < 0 ) {
8618 errorStream_ << "RtApiPulse::callbackEvent: audio write error, " <<
8619 pa_strerror( pa_error ) << ".";
8620 errorText_ = errorStream_.str();
8621 error( RtAudioError::WARNING );
8625 if ( stream_.mode == INPUT || stream_.mode == DUPLEX) {
8626 if ( stream_.doConvertBuffer[INPUT] )
8627 bytes = stream_.nDeviceChannels[INPUT] * stream_.bufferSize *
8628 formatBytes( stream_.deviceFormat[INPUT] );
8630 bytes = stream_.nUserChannels[INPUT] * stream_.bufferSize *
8631 formatBytes( stream_.userFormat );
8633 if ( pa_simple_read( pah->s_rec, pulse_in, bytes, &pa_error ) < 0 ) {
8634 errorStream_ << "RtApiPulse::callbackEvent: audio read error, " <<
8635 pa_strerror( pa_error ) << ".";
8636 errorText_ = errorStream_.str();
8637 error( RtAudioError::WARNING );
8639 if ( stream_.doConvertBuffer[INPUT] ) {
8640 convertBuffer( stream_.userBuffer[INPUT],
8641 stream_.deviceBuffer,
8642 stream_.convertInfo[INPUT] );
8647 MUTEX_UNLOCK( &stream_.mutex );
8648 RtApi::tickStreamTime();
8650 if ( doStopStream == 1 )
8654 void RtApiPulse::startStream( void )
8656 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8658 if ( stream_.state == STREAM_CLOSED ) {
8659 errorText_ = "RtApiPulse::startStream(): the stream is not open!";
8660 error( RtAudioError::INVALID_USE );
8663 if ( stream_.state == STREAM_RUNNING ) {
8664 errorText_ = "RtApiPulse::startStream(): the stream is already running!";
8665 error( RtAudioError::WARNING );
8669 MUTEX_LOCK( &stream_.mutex );
8671 #if defined( HAVE_GETTIMEOFDAY )
8672 gettimeofday( &stream_.lastTickTimestamp, NULL );
8675 stream_.state = STREAM_RUNNING;
8677 pah->runnable = true;
8678 pthread_cond_signal( &pah->runnable_cv );
8679 MUTEX_UNLOCK( &stream_.mutex );
8682 void RtApiPulse::stopStream( void )
8684 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8686 if ( stream_.state == STREAM_CLOSED ) {
8687 errorText_ = "RtApiPulse::stopStream(): the stream is not open!";
8688 error( RtAudioError::INVALID_USE );
8691 if ( stream_.state == STREAM_STOPPED ) {
8692 errorText_ = "RtApiPulse::stopStream(): the stream is already stopped!";
8693 error( RtAudioError::WARNING );
8697 stream_.state = STREAM_STOPPED;
8698 MUTEX_LOCK( &stream_.mutex );
8700 if ( pah && pah->s_play ) {
8702 if ( pa_simple_drain( pah->s_play, &pa_error ) < 0 ) {
8703 errorStream_ << "RtApiPulse::stopStream: error draining output device, " <<
8704 pa_strerror( pa_error ) << ".";
8705 errorText_ = errorStream_.str();
8706 MUTEX_UNLOCK( &stream_.mutex );
8707 error( RtAudioError::SYSTEM_ERROR );
8712 stream_.state = STREAM_STOPPED;
8713 MUTEX_UNLOCK( &stream_.mutex );
8716 void RtApiPulse::abortStream( void )
8718 PulseAudioHandle *pah = static_cast<PulseAudioHandle*>( stream_.apiHandle );
8720 if ( stream_.state == STREAM_CLOSED ) {
8721 errorText_ = "RtApiPulse::abortStream(): the stream is not open!";
8722 error( RtAudioError::INVALID_USE );
8725 if ( stream_.state == STREAM_STOPPED ) {
8726 errorText_ = "RtApiPulse::abortStream(): the stream is already stopped!";
8727 error( RtAudioError::WARNING );
8731 stream_.state = STREAM_STOPPED;
8732 MUTEX_LOCK( &stream_.mutex );
8734 if ( pah && pah->s_play ) {
8736 if ( pa_simple_flush( pah->s_play, &pa_error ) < 0 ) {
8737 errorStream_ << "RtApiPulse::abortStream: error flushing output device, " <<
8738 pa_strerror( pa_error ) << ".";
8739 errorText_ = errorStream_.str();
8740 MUTEX_UNLOCK( &stream_.mutex );
8741 error( RtAudioError::SYSTEM_ERROR );
8746 stream_.state = STREAM_STOPPED;
8747 MUTEX_UNLOCK( &stream_.mutex );
8750 bool RtApiPulse::probeDeviceOpen( unsigned int device, StreamMode mode,
8751 unsigned int channels, unsigned int firstChannel,
8752 unsigned int sampleRate, RtAudioFormat format,
8753 unsigned int *bufferSize, RtAudio::StreamOptions *options )
8755 PulseAudioHandle *pah = 0;
8756 unsigned long bufferBytes = 0;
8759 if ( device != 0 ) return false;
8760 if ( mode != INPUT && mode != OUTPUT ) return false;
8761 if ( channels != 1 && channels != 2 ) {
8762 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported number of channels.";
8765 ss.channels = channels;
8767 if ( firstChannel != 0 ) return false;
8769 bool sr_found = false;
8770 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr ) {
8771 if ( sampleRate == *sr ) {
8773 stream_.sampleRate = sampleRate;
8774 ss.rate = sampleRate;
8779 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported sample rate.";
8784 for ( const rtaudio_pa_format_mapping_t *sf = supported_sampleformats;
8785 sf->rtaudio_format && sf->pa_format != PA_SAMPLE_INVALID; ++sf ) {
8786 if ( format == sf->rtaudio_format ) {
8788 stream_.userFormat = sf->rtaudio_format;
8789 stream_.deviceFormat[mode] = stream_.userFormat;
8790 ss.format = sf->pa_format;
8794 if ( !sf_found ) { // Use internal data format conversion.
8795 stream_.userFormat = format;
8796 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
8797 ss.format = PA_SAMPLE_FLOAT32LE;
8800 // Set other stream parameters.
8801 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
8802 else stream_.userInterleaved = true;
8803 stream_.deviceInterleaved[mode] = true;
8804 stream_.nBuffers = 1;
8805 stream_.doByteSwap[mode] = false;
8806 stream_.nUserChannels[mode] = channels;
8807 stream_.nDeviceChannels[mode] = channels + firstChannel;
8808 stream_.channelOffset[mode] = 0;
8809 std::string streamName = "RtAudio";
8811 // Set flags for buffer conversion.
8812 stream_.doConvertBuffer[mode] = false;
8813 if ( stream_.userFormat != stream_.deviceFormat[mode] )
8814 stream_.doConvertBuffer[mode] = true;
8815 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
8816 stream_.doConvertBuffer[mode] = true;
8817 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] )
8818 stream_.doConvertBuffer[mode] = true;
8820 // Allocate necessary internal buffers.
8821 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
8822 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
8823 if ( stream_.userBuffer[mode] == NULL ) {
8824 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating user buffer memory.";
8827 stream_.bufferSize = *bufferSize;
8829 if ( stream_.doConvertBuffer[mode] ) {
8831 bool makeBuffer = true;
8832 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
8833 if ( mode == INPUT ) {
8834 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
8835 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
8836 if ( bufferBytes <= bytesOut ) makeBuffer = false;
8841 bufferBytes *= *bufferSize;
8842 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
8843 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
8844 if ( stream_.deviceBuffer == NULL ) {
8845 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating device buffer memory.";
8851 stream_.device[mode] = device;
8853 // Setup the buffer conversion information structure.
8854 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
8856 if ( !stream_.apiHandle ) {
8857 PulseAudioHandle *pah = new PulseAudioHandle;
8859 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating memory for handle.";
8863 stream_.apiHandle = pah;
8864 if ( pthread_cond_init( &pah->runnable_cv, NULL ) != 0 ) {
8865 errorText_ = "RtApiPulse::probeDeviceOpen: error creating condition variable.";
8869 pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8872 if ( options && !options->streamName.empty() ) streamName = options->streamName;
8875 pa_buffer_attr buffer_attr;
8876 buffer_attr.fragsize = bufferBytes;
8877 buffer_attr.maxlength = -1;
8879 pah->s_rec = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_RECORD, NULL, "Record", &ss, NULL, &buffer_attr, &error );
8880 if ( !pah->s_rec ) {
8881 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting input to PulseAudio server.";
8886 pah->s_play = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_PLAYBACK, NULL, "Playback", &ss, NULL, NULL, &error );
8887 if ( !pah->s_play ) {
8888 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting output to PulseAudio server.";
8896 if ( stream_.mode == UNINITIALIZED )
8897 stream_.mode = mode;
8898 else if ( stream_.mode == mode )
8901 stream_.mode = DUPLEX;
8903 if ( !stream_.callbackInfo.isRunning ) {
8904 stream_.callbackInfo.object = this;
8906 stream_.state = STREAM_STOPPED;
8907 // Set the thread attributes for joinable and realtime scheduling
8908 // priority (optional). The higher priority will only take affect
8909 // if the program is run as root or suid. Note, under Linux
8910 // processes with CAP_SYS_NICE privilege, a user can change
8911 // scheduling policy and priority (thus need not be root). See
8912 // POSIX "capabilities".
8913 pthread_attr_t attr;
8914 pthread_attr_init( &attr );
8915 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
8916 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
8917 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
8918 stream_.callbackInfo.doRealtime = true;
8919 struct sched_param param;
8920 int priority = options->priority;
8921 int min = sched_get_priority_min( SCHED_RR );
8922 int max = sched_get_priority_max( SCHED_RR );
8923 if ( priority < min ) priority = min;
8924 else if ( priority > max ) priority = max;
8925 param.sched_priority = priority;
8927 // Set the policy BEFORE the priority. Otherwise it fails.
8928 pthread_attr_setschedpolicy(&attr, SCHED_RR);
8929 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
8930 // This is definitely required. Otherwise it fails.
8931 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
8932 pthread_attr_setschedparam(&attr, ¶m);
8935 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
8937 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
8940 stream_.callbackInfo.isRunning = true;
8941 int result = pthread_create( &pah->thread, &attr, pulseaudio_callback, (void *)&stream_.callbackInfo);
8942 pthread_attr_destroy(&attr);
8944 // Failed. Try instead with default attributes.
8945 result = pthread_create( &pah->thread, NULL, pulseaudio_callback, (void *)&stream_.callbackInfo);
8947 stream_.callbackInfo.isRunning = false;
8948 errorText_ = "RtApiPulse::probeDeviceOpen: error creating thread.";
8957 if ( pah && stream_.callbackInfo.isRunning ) {
8958 pthread_cond_destroy( &pah->runnable_cv );
8960 stream_.apiHandle = 0;
8963 for ( int i=0; i<2; i++ ) {
8964 if ( stream_.userBuffer[i] ) {
8965 free( stream_.userBuffer[i] );
8966 stream_.userBuffer[i] = 0;
8970 if ( stream_.deviceBuffer ) {
8971 free( stream_.deviceBuffer );
8972 stream_.deviceBuffer = 0;
8975 stream_.state = STREAM_CLOSED;
8979 //******************** End of __LINUX_PULSE__ *********************//
8982 #if defined(__LINUX_OSS__)
8985 #include <sys/ioctl.h>
8988 #include <sys/soundcard.h>
8992 static void *ossCallbackHandler(void * ptr);
8994 // A structure to hold various information related to the OSS API
8997 int id[2]; // device ids
9000 pthread_cond_t runnable;
9003 :triggered(false) { id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
9006 RtApiOss :: RtApiOss()
9008 // Nothing to do here.
9011 RtApiOss :: ~RtApiOss()
9013 if ( stream_.state != STREAM_CLOSED ) closeStream();
9016 unsigned int RtApiOss :: getDeviceCount( void )
9018 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
9019 if ( mixerfd == -1 ) {
9020 errorText_ = "RtApiOss::getDeviceCount: error opening '/dev/mixer'.";
9021 error( RtAudioError::WARNING );
9025 oss_sysinfo sysinfo;
9026 if ( ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo ) == -1 ) {
9028 errorText_ = "RtApiOss::getDeviceCount: error getting sysinfo, OSS version >= 4.0 is required.";
9029 error( RtAudioError::WARNING );
9034 return sysinfo.numaudios;
9037 RtAudio::DeviceInfo RtApiOss :: getDeviceInfo( unsigned int device )
9039 RtAudio::DeviceInfo info;
9040 info.probed = false;
9042 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
9043 if ( mixerfd == -1 ) {
9044 errorText_ = "RtApiOss::getDeviceInfo: error opening '/dev/mixer'.";
9045 error( RtAudioError::WARNING );
9049 oss_sysinfo sysinfo;
9050 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
9051 if ( result == -1 ) {
9053 errorText_ = "RtApiOss::getDeviceInfo: error getting sysinfo, OSS version >= 4.0 is required.";
9054 error( RtAudioError::WARNING );
9058 unsigned nDevices = sysinfo.numaudios;
9059 if ( nDevices == 0 ) {
9061 errorText_ = "RtApiOss::getDeviceInfo: no devices found!";
9062 error( RtAudioError::INVALID_USE );
9066 if ( device >= nDevices ) {
9068 errorText_ = "RtApiOss::getDeviceInfo: device ID is invalid!";
9069 error( RtAudioError::INVALID_USE );
9073 oss_audioinfo ainfo;
9075 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
9077 if ( result == -1 ) {
9078 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
9079 errorText_ = errorStream_.str();
9080 error( RtAudioError::WARNING );
9085 if ( ainfo.caps & PCM_CAP_OUTPUT ) info.outputChannels = ainfo.max_channels;
9086 if ( ainfo.caps & PCM_CAP_INPUT ) info.inputChannels = ainfo.max_channels;
9087 if ( ainfo.caps & PCM_CAP_DUPLEX ) {
9088 if ( info.outputChannels > 0 && info.inputChannels > 0 && ainfo.caps & PCM_CAP_DUPLEX )
9089 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
9092 // Probe data formats ... do for input
9093 unsigned long mask = ainfo.iformats;
9094 if ( mask & AFMT_S16_LE || mask & AFMT_S16_BE )
9095 info.nativeFormats |= RTAUDIO_SINT16;
9096 if ( mask & AFMT_S8 )
9097 info.nativeFormats |= RTAUDIO_SINT8;
9098 if ( mask & AFMT_S32_LE || mask & AFMT_S32_BE )
9099 info.nativeFormats |= RTAUDIO_SINT32;
9101 if ( mask & AFMT_FLOAT )
9102 info.nativeFormats |= RTAUDIO_FLOAT32;
9104 if ( mask & AFMT_S24_LE || mask & AFMT_S24_BE )
9105 info.nativeFormats |= RTAUDIO_SINT24;
9107 // Check that we have at least one supported format
9108 if ( info.nativeFormats == 0 ) {
9109 errorStream_ << "RtApiOss::getDeviceInfo: device (" << ainfo.name << ") data format not supported by RtAudio.";
9110 errorText_ = errorStream_.str();
9111 error( RtAudioError::WARNING );
9115 // Probe the supported sample rates.
9116 info.sampleRates.clear();
9117 if ( ainfo.nrates ) {
9118 for ( unsigned int i=0; i<ainfo.nrates; i++ ) {
9119 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
9120 if ( ainfo.rates[i] == SAMPLE_RATES[k] ) {
9121 info.sampleRates.push_back( SAMPLE_RATES[k] );
9123 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
9124 info.preferredSampleRate = SAMPLE_RATES[k];
9132 // Check min and max rate values;
9133 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
9134 if ( ainfo.min_rate <= (int) SAMPLE_RATES[k] && ainfo.max_rate >= (int) SAMPLE_RATES[k] ) {
9135 info.sampleRates.push_back( SAMPLE_RATES[k] );
9137 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
9138 info.preferredSampleRate = SAMPLE_RATES[k];
9143 if ( info.sampleRates.size() == 0 ) {
9144 errorStream_ << "RtApiOss::getDeviceInfo: no supported sample rates found for device (" << ainfo.name << ").";
9145 errorText_ = errorStream_.str();
9146 error( RtAudioError::WARNING );
9150 info.name = ainfo.name;
9157 bool RtApiOss :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
9158 unsigned int firstChannel, unsigned int sampleRate,
9159 RtAudioFormat format, unsigned int *bufferSize,
9160 RtAudio::StreamOptions *options )
9162 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
9163 if ( mixerfd == -1 ) {
9164 errorText_ = "RtApiOss::probeDeviceOpen: error opening '/dev/mixer'.";
9168 oss_sysinfo sysinfo;
9169 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
9170 if ( result == -1 ) {
9172 errorText_ = "RtApiOss::probeDeviceOpen: error getting sysinfo, OSS version >= 4.0 is required.";
9176 unsigned nDevices = sysinfo.numaudios;
9177 if ( nDevices == 0 ) {
9178 // This should not happen because a check is made before this function is called.
9180 errorText_ = "RtApiOss::probeDeviceOpen: no devices found!";
9184 if ( device >= nDevices ) {
9185 // This should not happen because a check is made before this function is called.
9187 errorText_ = "RtApiOss::probeDeviceOpen: device ID is invalid!";
9191 oss_audioinfo ainfo;
9193 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
9195 if ( result == -1 ) {
9196 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
9197 errorText_ = errorStream_.str();
9201 // Check if device supports input or output
9202 if ( ( mode == OUTPUT && !( ainfo.caps & PCM_CAP_OUTPUT ) ) ||
9203 ( mode == INPUT && !( ainfo.caps & PCM_CAP_INPUT ) ) ) {
9204 if ( mode == OUTPUT )
9205 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support output.";
9207 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support input.";
9208 errorText_ = errorStream_.str();
9213 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9214 if ( mode == OUTPUT )
9216 else { // mode == INPUT
9217 if (stream_.mode == OUTPUT && stream_.device[0] == device) {
9218 // We just set the same device for playback ... close and reopen for duplex (OSS only).
9219 close( handle->id[0] );
9221 if ( !( ainfo.caps & PCM_CAP_DUPLEX ) ) {
9222 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support duplex mode.";
9223 errorText_ = errorStream_.str();
9226 // Check that the number previously set channels is the same.
9227 if ( stream_.nUserChannels[0] != channels ) {
9228 errorStream_ << "RtApiOss::probeDeviceOpen: input/output channels must be equal for OSS duplex device (" << ainfo.name << ").";
9229 errorText_ = errorStream_.str();
9238 // Set exclusive access if specified.
9239 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) flags |= O_EXCL;
9241 // Try to open the device.
9243 fd = open( ainfo.devnode, flags, 0 );
9245 if ( errno == EBUSY )
9246 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") is busy.";
9248 errorStream_ << "RtApiOss::probeDeviceOpen: error opening device (" << ainfo.name << ").";
9249 errorText_ = errorStream_.str();
9253 // For duplex operation, specifically set this mode (this doesn't seem to work).
9255 if ( flags | O_RDWR ) {
9256 result = ioctl( fd, SNDCTL_DSP_SETDUPLEX, NULL );
9257 if ( result == -1) {
9258 errorStream_ << "RtApiOss::probeDeviceOpen: error setting duplex mode for device (" << ainfo.name << ").";
9259 errorText_ = errorStream_.str();
9265 // Check the device channel support.
9266 stream_.nUserChannels[mode] = channels;
9267 if ( ainfo.max_channels < (int)(channels + firstChannel) ) {
9269 errorStream_ << "RtApiOss::probeDeviceOpen: the device (" << ainfo.name << ") does not support requested channel parameters.";
9270 errorText_ = errorStream_.str();
9274 // Set the number of channels.
9275 int deviceChannels = channels + firstChannel;
9276 result = ioctl( fd, SNDCTL_DSP_CHANNELS, &deviceChannels );
9277 if ( result == -1 || deviceChannels < (int)(channels + firstChannel) ) {
9279 errorStream_ << "RtApiOss::probeDeviceOpen: error setting channel parameters on device (" << ainfo.name << ").";
9280 errorText_ = errorStream_.str();
9283 stream_.nDeviceChannels[mode] = deviceChannels;
9285 // Get the data format mask
9287 result = ioctl( fd, SNDCTL_DSP_GETFMTS, &mask );
9288 if ( result == -1 ) {
9290 errorStream_ << "RtApiOss::probeDeviceOpen: error getting device (" << ainfo.name << ") data formats.";
9291 errorText_ = errorStream_.str();
9295 // Determine how to set the device format.
9296 stream_.userFormat = format;
9297 int deviceFormat = -1;
9298 stream_.doByteSwap[mode] = false;
9299 if ( format == RTAUDIO_SINT8 ) {
9300 if ( mask & AFMT_S8 ) {
9301 deviceFormat = AFMT_S8;
9302 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
9305 else if ( format == RTAUDIO_SINT16 ) {
9306 if ( mask & AFMT_S16_NE ) {
9307 deviceFormat = AFMT_S16_NE;
9308 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9310 else if ( mask & AFMT_S16_OE ) {
9311 deviceFormat = AFMT_S16_OE;
9312 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9313 stream_.doByteSwap[mode] = true;
9316 else if ( format == RTAUDIO_SINT24 ) {
9317 if ( mask & AFMT_S24_NE ) {
9318 deviceFormat = AFMT_S24_NE;
9319 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9321 else if ( mask & AFMT_S24_OE ) {
9322 deviceFormat = AFMT_S24_OE;
9323 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9324 stream_.doByteSwap[mode] = true;
9327 else if ( format == RTAUDIO_SINT32 ) {
9328 if ( mask & AFMT_S32_NE ) {
9329 deviceFormat = AFMT_S32_NE;
9330 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9332 else if ( mask & AFMT_S32_OE ) {
9333 deviceFormat = AFMT_S32_OE;
9334 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9335 stream_.doByteSwap[mode] = true;
9339 if ( deviceFormat == -1 ) {
9340 // The user requested format is not natively supported by the device.
9341 if ( mask & AFMT_S16_NE ) {
9342 deviceFormat = AFMT_S16_NE;
9343 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9345 else if ( mask & AFMT_S32_NE ) {
9346 deviceFormat = AFMT_S32_NE;
9347 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9349 else if ( mask & AFMT_S24_NE ) {
9350 deviceFormat = AFMT_S24_NE;
9351 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9353 else if ( mask & AFMT_S16_OE ) {
9354 deviceFormat = AFMT_S16_OE;
9355 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9356 stream_.doByteSwap[mode] = true;
9358 else if ( mask & AFMT_S32_OE ) {
9359 deviceFormat = AFMT_S32_OE;
9360 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9361 stream_.doByteSwap[mode] = true;
9363 else if ( mask & AFMT_S24_OE ) {
9364 deviceFormat = AFMT_S24_OE;
9365 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9366 stream_.doByteSwap[mode] = true;
9368 else if ( mask & AFMT_S8) {
9369 deviceFormat = AFMT_S8;
9370 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
9374 if ( stream_.deviceFormat[mode] == 0 ) {
9375 // This really shouldn't happen ...
9377 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") data format not supported by RtAudio.";
9378 errorText_ = errorStream_.str();
9382 // Set the data format.
9383 int temp = deviceFormat;
9384 result = ioctl( fd, SNDCTL_DSP_SETFMT, &deviceFormat );
9385 if ( result == -1 || deviceFormat != temp ) {
9387 errorStream_ << "RtApiOss::probeDeviceOpen: error setting data format on device (" << ainfo.name << ").";
9388 errorText_ = errorStream_.str();
9392 // Attempt to set the buffer size. According to OSS, the minimum
9393 // number of buffers is two. The supposed minimum buffer size is 16
9394 // bytes, so that will be our lower bound. The argument to this
9395 // call is in the form 0xMMMMSSSS (hex), where the buffer size (in
9396 // bytes) is given as 2^SSSS and the number of buffers as 2^MMMM.
9397 // We'll check the actual value used near the end of the setup
9399 int ossBufferBytes = *bufferSize * formatBytes( stream_.deviceFormat[mode] ) * deviceChannels;
9400 if ( ossBufferBytes < 16 ) ossBufferBytes = 16;
9402 if ( options ) buffers = options->numberOfBuffers;
9403 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) buffers = 2;
9404 if ( buffers < 2 ) buffers = 3;
9405 temp = ((int) buffers << 16) + (int)( log10( (double)ossBufferBytes ) / log10( 2.0 ) );
9406 result = ioctl( fd, SNDCTL_DSP_SETFRAGMENT, &temp );
9407 if ( result == -1 ) {
9409 errorStream_ << "RtApiOss::probeDeviceOpen: error setting buffer size on device (" << ainfo.name << ").";
9410 errorText_ = errorStream_.str();
9413 stream_.nBuffers = buffers;
9415 // Save buffer size (in sample frames).
9416 *bufferSize = ossBufferBytes / ( formatBytes(stream_.deviceFormat[mode]) * deviceChannels );
9417 stream_.bufferSize = *bufferSize;
9419 // Set the sample rate.
9420 int srate = sampleRate;
9421 result = ioctl( fd, SNDCTL_DSP_SPEED, &srate );
9422 if ( result == -1 ) {
9424 errorStream_ << "RtApiOss::probeDeviceOpen: error setting sample rate (" << sampleRate << ") on device (" << ainfo.name << ").";
9425 errorText_ = errorStream_.str();
9429 // Verify the sample rate setup worked.
9430 if ( abs( srate - (int)sampleRate ) > 100 ) {
9432 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support sample rate (" << sampleRate << ").";
9433 errorText_ = errorStream_.str();
9436 stream_.sampleRate = sampleRate;
9438 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device) {
9439 // We're doing duplex setup here.
9440 stream_.deviceFormat[0] = stream_.deviceFormat[1];
9441 stream_.nDeviceChannels[0] = deviceChannels;
9444 // Set interleaving parameters.
9445 stream_.userInterleaved = true;
9446 stream_.deviceInterleaved[mode] = true;
9447 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
9448 stream_.userInterleaved = false;
9450 // Set flags for buffer conversion
9451 stream_.doConvertBuffer[mode] = false;
9452 if ( stream_.userFormat != stream_.deviceFormat[mode] )
9453 stream_.doConvertBuffer[mode] = true;
9454 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
9455 stream_.doConvertBuffer[mode] = true;
9456 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
9457 stream_.nUserChannels[mode] > 1 )
9458 stream_.doConvertBuffer[mode] = true;
9460 // Allocate the stream handles if necessary and then save.
9461 if ( stream_.apiHandle == 0 ) {
9463 handle = new OssHandle;
9465 catch ( std::bad_alloc& ) {
9466 errorText_ = "RtApiOss::probeDeviceOpen: error allocating OssHandle memory.";
9470 if ( pthread_cond_init( &handle->runnable, NULL ) ) {
9471 errorText_ = "RtApiOss::probeDeviceOpen: error initializing pthread condition variable.";
9475 stream_.apiHandle = (void *) handle;
9478 handle = (OssHandle *) stream_.apiHandle;
9480 handle->id[mode] = fd;
9482 // Allocate necessary internal buffers.
9483 unsigned long bufferBytes;
9484 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
9485 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
9486 if ( stream_.userBuffer[mode] == NULL ) {
9487 errorText_ = "RtApiOss::probeDeviceOpen: error allocating user buffer memory.";
9491 if ( stream_.doConvertBuffer[mode] ) {
9493 bool makeBuffer = true;
9494 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
9495 if ( mode == INPUT ) {
9496 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
9497 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
9498 if ( bufferBytes <= bytesOut ) makeBuffer = false;
9503 bufferBytes *= *bufferSize;
9504 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
9505 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
9506 if ( stream_.deviceBuffer == NULL ) {
9507 errorText_ = "RtApiOss::probeDeviceOpen: error allocating device buffer memory.";
9513 stream_.device[mode] = device;
9514 stream_.state = STREAM_STOPPED;
9516 // Setup the buffer conversion information structure.
9517 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
9519 // Setup thread if necessary.
9520 if ( stream_.mode == OUTPUT && mode == INPUT ) {
9521 // We had already set up an output stream.
9522 stream_.mode = DUPLEX;
9523 if ( stream_.device[0] == device ) handle->id[0] = fd;
9526 stream_.mode = mode;
9528 // Setup callback thread.
9529 stream_.callbackInfo.object = (void *) this;
9531 // Set the thread attributes for joinable and realtime scheduling
9532 // priority. The higher priority will only take affect if the
9533 // program is run as root or suid.
9534 pthread_attr_t attr;
9535 pthread_attr_init( &attr );
9536 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
9537 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
9538 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
9539 stream_.callbackInfo.doRealtime = true;
9540 struct sched_param param;
9541 int priority = options->priority;
9542 int min = sched_get_priority_min( SCHED_RR );
9543 int max = sched_get_priority_max( SCHED_RR );
9544 if ( priority < min ) priority = min;
9545 else if ( priority > max ) priority = max;
9546 param.sched_priority = priority;
9548 // Set the policy BEFORE the priority. Otherwise it fails.
9549 pthread_attr_setschedpolicy(&attr, SCHED_RR);
9550 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
9551 // This is definitely required. Otherwise it fails.
9552 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
9553 pthread_attr_setschedparam(&attr, ¶m);
9556 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
9558 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
9561 stream_.callbackInfo.isRunning = true;
9562 result = pthread_create( &stream_.callbackInfo.thread, &attr, ossCallbackHandler, &stream_.callbackInfo );
9563 pthread_attr_destroy( &attr );
9565 // Failed. Try instead with default attributes.
9566 result = pthread_create( &stream_.callbackInfo.thread, NULL, ossCallbackHandler, &stream_.callbackInfo );
9568 stream_.callbackInfo.isRunning = false;
9569 errorText_ = "RtApiOss::error creating callback thread!";
9579 pthread_cond_destroy( &handle->runnable );
9580 if ( handle->id[0] ) close( handle->id[0] );
9581 if ( handle->id[1] ) close( handle->id[1] );
9583 stream_.apiHandle = 0;
9586 for ( int i=0; i<2; i++ ) {
9587 if ( stream_.userBuffer[i] ) {
9588 free( stream_.userBuffer[i] );
9589 stream_.userBuffer[i] = 0;
9593 if ( stream_.deviceBuffer ) {
9594 free( stream_.deviceBuffer );
9595 stream_.deviceBuffer = 0;
9598 stream_.state = STREAM_CLOSED;
9602 void RtApiOss :: closeStream()
9604 if ( stream_.state == STREAM_CLOSED ) {
9605 errorText_ = "RtApiOss::closeStream(): no open stream to close!";
9606 error( RtAudioError::WARNING );
9610 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9611 stream_.callbackInfo.isRunning = false;
9612 MUTEX_LOCK( &stream_.mutex );
9613 if ( stream_.state == STREAM_STOPPED )
9614 pthread_cond_signal( &handle->runnable );
9615 MUTEX_UNLOCK( &stream_.mutex );
9616 pthread_join( stream_.callbackInfo.thread, NULL );
9618 if ( stream_.state == STREAM_RUNNING ) {
9619 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
9620 ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9622 ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9623 stream_.state = STREAM_STOPPED;
9627 pthread_cond_destroy( &handle->runnable );
9628 if ( handle->id[0] ) close( handle->id[0] );
9629 if ( handle->id[1] ) close( handle->id[1] );
9631 stream_.apiHandle = 0;
9634 for ( int i=0; i<2; i++ ) {
9635 if ( stream_.userBuffer[i] ) {
9636 free( stream_.userBuffer[i] );
9637 stream_.userBuffer[i] = 0;
9641 if ( stream_.deviceBuffer ) {
9642 free( stream_.deviceBuffer );
9643 stream_.deviceBuffer = 0;
9646 stream_.mode = UNINITIALIZED;
9647 stream_.state = STREAM_CLOSED;
9650 void RtApiOss :: startStream()
9653 if ( stream_.state == STREAM_RUNNING ) {
9654 errorText_ = "RtApiOss::startStream(): the stream is already running!";
9655 error( RtAudioError::WARNING );
9659 MUTEX_LOCK( &stream_.mutex );
9661 #if defined( HAVE_GETTIMEOFDAY )
9662 gettimeofday( &stream_.lastTickTimestamp, NULL );
9665 stream_.state = STREAM_RUNNING;
9667 // No need to do anything else here ... OSS automatically starts
9668 // when fed samples.
9670 MUTEX_UNLOCK( &stream_.mutex );
9672 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9673 pthread_cond_signal( &handle->runnable );
9676 void RtApiOss :: stopStream()
9679 if ( stream_.state == STREAM_STOPPED ) {
9680 errorText_ = "RtApiOss::stopStream(): the stream is already stopped!";
9681 error( RtAudioError::WARNING );
9685 MUTEX_LOCK( &stream_.mutex );
9687 // The state might change while waiting on a mutex.
9688 if ( stream_.state == STREAM_STOPPED ) {
9689 MUTEX_UNLOCK( &stream_.mutex );
9694 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9695 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9697 // Flush the output with zeros a few times.
9700 RtAudioFormat format;
9702 if ( stream_.doConvertBuffer[0] ) {
9703 buffer = stream_.deviceBuffer;
9704 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
9705 format = stream_.deviceFormat[0];
9708 buffer = stream_.userBuffer[0];
9709 samples = stream_.bufferSize * stream_.nUserChannels[0];
9710 format = stream_.userFormat;
9713 memset( buffer, 0, samples * formatBytes(format) );
9714 for ( unsigned int i=0; i<stream_.nBuffers+1; i++ ) {
9715 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9716 if ( result == -1 ) {
9717 errorText_ = "RtApiOss::stopStream: audio write error.";
9718 error( RtAudioError::WARNING );
9722 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9723 if ( result == -1 ) {
9724 errorStream_ << "RtApiOss::stopStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
9725 errorText_ = errorStream_.str();
9728 handle->triggered = false;
9731 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
9732 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9733 if ( result == -1 ) {
9734 errorStream_ << "RtApiOss::stopStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
9735 errorText_ = errorStream_.str();
9741 stream_.state = STREAM_STOPPED;
9742 MUTEX_UNLOCK( &stream_.mutex );
9744 if ( result != -1 ) return;
9745 error( RtAudioError::SYSTEM_ERROR );
9748 void RtApiOss :: abortStream()
9751 if ( stream_.state == STREAM_STOPPED ) {
9752 errorText_ = "RtApiOss::abortStream(): the stream is already stopped!";
9753 error( RtAudioError::WARNING );
9757 MUTEX_LOCK( &stream_.mutex );
9759 // The state might change while waiting on a mutex.
9760 if ( stream_.state == STREAM_STOPPED ) {
9761 MUTEX_UNLOCK( &stream_.mutex );
9766 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9767 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9768 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9769 if ( result == -1 ) {
9770 errorStream_ << "RtApiOss::abortStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
9771 errorText_ = errorStream_.str();
9774 handle->triggered = false;
9777 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
9778 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9779 if ( result == -1 ) {
9780 errorStream_ << "RtApiOss::abortStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
9781 errorText_ = errorStream_.str();
9787 stream_.state = STREAM_STOPPED;
9788 MUTEX_UNLOCK( &stream_.mutex );
9790 if ( result != -1 ) return;
9791 error( RtAudioError::SYSTEM_ERROR );
9794 void RtApiOss :: callbackEvent()
9796 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9797 if ( stream_.state == STREAM_STOPPED ) {
9798 MUTEX_LOCK( &stream_.mutex );
9799 pthread_cond_wait( &handle->runnable, &stream_.mutex );
9800 if ( stream_.state != STREAM_RUNNING ) {
9801 MUTEX_UNLOCK( &stream_.mutex );
9804 MUTEX_UNLOCK( &stream_.mutex );
9807 if ( stream_.state == STREAM_CLOSED ) {
9808 errorText_ = "RtApiOss::callbackEvent(): the stream is closed ... this shouldn't happen!";
9809 error( RtAudioError::WARNING );
9813 // Invoke user callback to get fresh output data.
9814 int doStopStream = 0;
9815 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
9816 double streamTime = getStreamTime();
9817 RtAudioStreamStatus status = 0;
9818 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
9819 status |= RTAUDIO_OUTPUT_UNDERFLOW;
9820 handle->xrun[0] = false;
9822 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
9823 status |= RTAUDIO_INPUT_OVERFLOW;
9824 handle->xrun[1] = false;
9826 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
9827 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
9828 if ( doStopStream == 2 ) {
9829 this->abortStream();
9833 MUTEX_LOCK( &stream_.mutex );
9835 // The state might change while waiting on a mutex.
9836 if ( stream_.state == STREAM_STOPPED ) goto unlock;
9841 RtAudioFormat format;
9843 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9845 // Setup parameters and do buffer conversion if necessary.
9846 if ( stream_.doConvertBuffer[0] ) {
9847 buffer = stream_.deviceBuffer;
9848 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
9849 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
9850 format = stream_.deviceFormat[0];
9853 buffer = stream_.userBuffer[0];
9854 samples = stream_.bufferSize * stream_.nUserChannels[0];
9855 format = stream_.userFormat;
9858 // Do byte swapping if necessary.
9859 if ( stream_.doByteSwap[0] )
9860 byteSwapBuffer( buffer, samples, format );
9862 if ( stream_.mode == DUPLEX && handle->triggered == false ) {
9864 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
9865 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9866 trig = PCM_ENABLE_INPUT|PCM_ENABLE_OUTPUT;
9867 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
9868 handle->triggered = true;
9871 // Write samples to device.
9872 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9874 if ( result == -1 ) {
9875 // We'll assume this is an underrun, though there isn't a
9876 // specific means for determining that.
9877 handle->xrun[0] = true;
9878 errorText_ = "RtApiOss::callbackEvent: audio write error.";
9879 error( RtAudioError::WARNING );
9880 // Continue on to input section.
9884 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
9886 // Setup parameters.
9887 if ( stream_.doConvertBuffer[1] ) {
9888 buffer = stream_.deviceBuffer;
9889 samples = stream_.bufferSize * stream_.nDeviceChannels[1];
9890 format = stream_.deviceFormat[1];
9893 buffer = stream_.userBuffer[1];
9894 samples = stream_.bufferSize * stream_.nUserChannels[1];
9895 format = stream_.userFormat;
9898 // Read samples from device.
9899 result = read( handle->id[1], buffer, samples * formatBytes(format) );
9901 if ( result == -1 ) {
9902 // We'll assume this is an overrun, though there isn't a
9903 // specific means for determining that.
9904 handle->xrun[1] = true;
9905 errorText_ = "RtApiOss::callbackEvent: audio read error.";
9906 error( RtAudioError::WARNING );
9910 // Do byte swapping if necessary.
9911 if ( stream_.doByteSwap[1] )
9912 byteSwapBuffer( buffer, samples, format );
9914 // Do buffer conversion if necessary.
9915 if ( stream_.doConvertBuffer[1] )
9916 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
9920 MUTEX_UNLOCK( &stream_.mutex );
9922 RtApi::tickStreamTime();
9923 if ( doStopStream == 1 ) this->stopStream();
9926 static void *ossCallbackHandler( void *ptr )
9928 CallbackInfo *info = (CallbackInfo *) ptr;
9929 RtApiOss *object = (RtApiOss *) info->object;
9930 bool *isRunning = &info->isRunning;
9932 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
9933 if (info->doRealtime) {
9934 std::cerr << "RtAudio oss: " <<
9935 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
9936 "running realtime scheduling" << std::endl;
9940 while ( *isRunning == true ) {
9941 pthread_testcancel();
9942 object->callbackEvent();
9945 pthread_exit( NULL );
9948 //******************** End of __LINUX_OSS__ *********************//
9952 // *************************************************** //
9954 // Protected common (OS-independent) RtAudio methods.
9956 // *************************************************** //
9958 // This method can be modified to control the behavior of error
9959 // message printing.
9960 void RtApi :: error( RtAudioError::Type type )
9962 errorStream_.str(""); // clear the ostringstream
9964 RtAudioErrorCallback errorCallback = (RtAudioErrorCallback) stream_.callbackInfo.errorCallback;
9965 if ( errorCallback ) {
9966 // abortStream() can generate new error messages. Ignore them. Just keep original one.
9968 if ( firstErrorOccurred_ )
9971 firstErrorOccurred_ = true;
9972 const std::string errorMessage = errorText_;
9974 if ( type != RtAudioError::WARNING && stream_.state != STREAM_STOPPED) {
9975 stream_.callbackInfo.isRunning = false; // exit from the thread
9979 errorCallback( type, errorMessage );
9980 firstErrorOccurred_ = false;
9984 if ( type == RtAudioError::WARNING && showWarnings_ == true )
9985 std::cerr << '\n' << errorText_ << "\n\n";
9986 else if ( type != RtAudioError::WARNING )
9987 throw( RtAudioError( errorText_, type ) );
9990 void RtApi :: verifyStream()
9992 if ( stream_.state == STREAM_CLOSED ) {
9993 errorText_ = "RtApi:: a stream is not open!";
9994 error( RtAudioError::INVALID_USE );
9998 void RtApi :: clearStreamInfo()
10000 stream_.mode = UNINITIALIZED;
10001 stream_.state = STREAM_CLOSED;
10002 stream_.sampleRate = 0;
10003 stream_.bufferSize = 0;
10004 stream_.nBuffers = 0;
10005 stream_.userFormat = 0;
10006 stream_.userInterleaved = true;
10007 stream_.streamTime = 0.0;
10008 stream_.apiHandle = 0;
10009 stream_.deviceBuffer = 0;
10010 stream_.callbackInfo.callback = 0;
10011 stream_.callbackInfo.userData = 0;
10012 stream_.callbackInfo.isRunning = false;
10013 stream_.callbackInfo.errorCallback = 0;
10014 for ( int i=0; i<2; i++ ) {
10015 stream_.device[i] = 11111;
10016 stream_.doConvertBuffer[i] = false;
10017 stream_.deviceInterleaved[i] = true;
10018 stream_.doByteSwap[i] = false;
10019 stream_.nUserChannels[i] = 0;
10020 stream_.nDeviceChannels[i] = 0;
10021 stream_.channelOffset[i] = 0;
10022 stream_.deviceFormat[i] = 0;
10023 stream_.latency[i] = 0;
10024 stream_.userBuffer[i] = 0;
10025 stream_.convertInfo[i].channels = 0;
10026 stream_.convertInfo[i].inJump = 0;
10027 stream_.convertInfo[i].outJump = 0;
10028 stream_.convertInfo[i].inFormat = 0;
10029 stream_.convertInfo[i].outFormat = 0;
10030 stream_.convertInfo[i].inOffset.clear();
10031 stream_.convertInfo[i].outOffset.clear();
10035 unsigned int RtApi :: formatBytes( RtAudioFormat format )
10037 if ( format == RTAUDIO_SINT16 )
10039 else if ( format == RTAUDIO_SINT32 || format == RTAUDIO_FLOAT32 )
10041 else if ( format == RTAUDIO_FLOAT64 )
10043 else if ( format == RTAUDIO_SINT24 )
10045 else if ( format == RTAUDIO_SINT8 )
10048 errorText_ = "RtApi::formatBytes: undefined format.";
10049 error( RtAudioError::WARNING );
10054 void RtApi :: setConvertInfo( StreamMode mode, unsigned int firstChannel )
10056 if ( mode == INPUT ) { // convert device to user buffer
10057 stream_.convertInfo[mode].inJump = stream_.nDeviceChannels[1];
10058 stream_.convertInfo[mode].outJump = stream_.nUserChannels[1];
10059 stream_.convertInfo[mode].inFormat = stream_.deviceFormat[1];
10060 stream_.convertInfo[mode].outFormat = stream_.userFormat;
10062 else { // convert user to device buffer
10063 stream_.convertInfo[mode].inJump = stream_.nUserChannels[0];
10064 stream_.convertInfo[mode].outJump = stream_.nDeviceChannels[0];
10065 stream_.convertInfo[mode].inFormat = stream_.userFormat;
10066 stream_.convertInfo[mode].outFormat = stream_.deviceFormat[0];
10069 if ( stream_.convertInfo[mode].inJump < stream_.convertInfo[mode].outJump )
10070 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].inJump;
10072 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].outJump;
10074 // Set up the interleave/deinterleave offsets.
10075 if ( stream_.deviceInterleaved[mode] != stream_.userInterleaved ) {
10076 if ( ( mode == OUTPUT && stream_.deviceInterleaved[mode] ) ||
10077 ( mode == INPUT && stream_.userInterleaved ) ) {
10078 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
10079 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
10080 stream_.convertInfo[mode].outOffset.push_back( k );
10081 stream_.convertInfo[mode].inJump = 1;
10085 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
10086 stream_.convertInfo[mode].inOffset.push_back( k );
10087 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
10088 stream_.convertInfo[mode].outJump = 1;
10092 else { // no (de)interleaving
10093 if ( stream_.userInterleaved ) {
10094 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
10095 stream_.convertInfo[mode].inOffset.push_back( k );
10096 stream_.convertInfo[mode].outOffset.push_back( k );
10100 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
10101 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
10102 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
10103 stream_.convertInfo[mode].inJump = 1;
10104 stream_.convertInfo[mode].outJump = 1;
10109 // Add channel offset.
10110 if ( firstChannel > 0 ) {
10111 if ( stream_.deviceInterleaved[mode] ) {
10112 if ( mode == OUTPUT ) {
10113 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
10114 stream_.convertInfo[mode].outOffset[k] += firstChannel;
10117 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
10118 stream_.convertInfo[mode].inOffset[k] += firstChannel;
10122 if ( mode == OUTPUT ) {
10123 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
10124 stream_.convertInfo[mode].outOffset[k] += ( firstChannel * stream_.bufferSize );
10127 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
10128 stream_.convertInfo[mode].inOffset[k] += ( firstChannel * stream_.bufferSize );
10134 void RtApi :: convertBuffer( char *outBuffer, char *inBuffer, ConvertInfo &info )
10136 // This function does format conversion, input/output channel compensation, and
10137 // data interleaving/deinterleaving. 24-bit integers are assumed to occupy
10138 // the lower three bytes of a 32-bit integer.
10140 // Clear our device buffer when in/out duplex device channels are different
10141 if ( outBuffer == stream_.deviceBuffer && stream_.mode == DUPLEX &&
10142 ( stream_.nDeviceChannels[0] < stream_.nDeviceChannels[1] ) )
10143 memset( outBuffer, 0, stream_.bufferSize * info.outJump * formatBytes( info.outFormat ) );
10146 if (info.outFormat == RTAUDIO_FLOAT64) {
10148 Float64 *out = (Float64 *)outBuffer;
10150 if (info.inFormat == RTAUDIO_SINT8) {
10151 signed char *in = (signed char *)inBuffer;
10152 scale = 1.0 / 127.5;
10153 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10154 for (j=0; j<info.channels; j++) {
10155 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
10156 out[info.outOffset[j]] += 0.5;
10157 out[info.outOffset[j]] *= scale;
10160 out += info.outJump;
10163 else if (info.inFormat == RTAUDIO_SINT16) {
10164 Int16 *in = (Int16 *)inBuffer;
10165 scale = 1.0 / 32767.5;
10166 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10167 for (j=0; j<info.channels; j++) {
10168 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
10169 out[info.outOffset[j]] += 0.5;
10170 out[info.outOffset[j]] *= scale;
10173 out += info.outJump;
10176 else if (info.inFormat == RTAUDIO_SINT24) {
10177 Int24 *in = (Int24 *)inBuffer;
10178 scale = 1.0 / 8388607.5;
10179 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10180 for (j=0; j<info.channels; j++) {
10181 out[info.outOffset[j]] = (Float64) (in[info.inOffset[j]].asInt());
10182 out[info.outOffset[j]] += 0.5;
10183 out[info.outOffset[j]] *= scale;
10186 out += info.outJump;
10189 else if (info.inFormat == RTAUDIO_SINT32) {
10190 Int32 *in = (Int32 *)inBuffer;
10191 scale = 1.0 / 2147483647.5;
10192 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10193 for (j=0; j<info.channels; j++) {
10194 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
10195 out[info.outOffset[j]] += 0.5;
10196 out[info.outOffset[j]] *= scale;
10199 out += info.outJump;
10202 else if (info.inFormat == RTAUDIO_FLOAT32) {
10203 Float32 *in = (Float32 *)inBuffer;
10204 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10205 for (j=0; j<info.channels; j++) {
10206 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
10209 out += info.outJump;
10212 else if (info.inFormat == RTAUDIO_FLOAT64) {
10213 // Channel compensation and/or (de)interleaving only.
10214 Float64 *in = (Float64 *)inBuffer;
10215 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10216 for (j=0; j<info.channels; j++) {
10217 out[info.outOffset[j]] = in[info.inOffset[j]];
10220 out += info.outJump;
10224 else if (info.outFormat == RTAUDIO_FLOAT32) {
10226 Float32 *out = (Float32 *)outBuffer;
10228 if (info.inFormat == RTAUDIO_SINT8) {
10229 signed char *in = (signed char *)inBuffer;
10230 scale = (Float32) ( 1.0 / 127.5 );
10231 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10232 for (j=0; j<info.channels; j++) {
10233 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10234 out[info.outOffset[j]] += 0.5;
10235 out[info.outOffset[j]] *= scale;
10238 out += info.outJump;
10241 else if (info.inFormat == RTAUDIO_SINT16) {
10242 Int16 *in = (Int16 *)inBuffer;
10243 scale = (Float32) ( 1.0 / 32767.5 );
10244 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10245 for (j=0; j<info.channels; j++) {
10246 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10247 out[info.outOffset[j]] += 0.5;
10248 out[info.outOffset[j]] *= scale;
10251 out += info.outJump;
10254 else if (info.inFormat == RTAUDIO_SINT24) {
10255 Int24 *in = (Int24 *)inBuffer;
10256 scale = (Float32) ( 1.0 / 8388607.5 );
10257 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10258 for (j=0; j<info.channels; j++) {
10259 out[info.outOffset[j]] = (Float32) (in[info.inOffset[j]].asInt());
10260 out[info.outOffset[j]] += 0.5;
10261 out[info.outOffset[j]] *= scale;
10264 out += info.outJump;
10267 else if (info.inFormat == RTAUDIO_SINT32) {
10268 Int32 *in = (Int32 *)inBuffer;
10269 scale = (Float32) ( 1.0 / 2147483647.5 );
10270 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10271 for (j=0; j<info.channels; j++) {
10272 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10273 out[info.outOffset[j]] += 0.5;
10274 out[info.outOffset[j]] *= scale;
10277 out += info.outJump;
10280 else if (info.inFormat == RTAUDIO_FLOAT32) {
10281 // Channel compensation and/or (de)interleaving only.
10282 Float32 *in = (Float32 *)inBuffer;
10283 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10284 for (j=0; j<info.channels; j++) {
10285 out[info.outOffset[j]] = in[info.inOffset[j]];
10288 out += info.outJump;
10291 else if (info.inFormat == RTAUDIO_FLOAT64) {
10292 Float64 *in = (Float64 *)inBuffer;
10293 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10294 for (j=0; j<info.channels; j++) {
10295 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10298 out += info.outJump;
10302 else if (info.outFormat == RTAUDIO_SINT32) {
10303 Int32 *out = (Int32 *)outBuffer;
10304 if (info.inFormat == RTAUDIO_SINT8) {
10305 signed char *in = (signed char *)inBuffer;
10306 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10307 for (j=0; j<info.channels; j++) {
10308 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
10309 out[info.outOffset[j]] <<= 24;
10312 out += info.outJump;
10315 else if (info.inFormat == RTAUDIO_SINT16) {
10316 Int16 *in = (Int16 *)inBuffer;
10317 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10318 for (j=0; j<info.channels; j++) {
10319 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
10320 out[info.outOffset[j]] <<= 16;
10323 out += info.outJump;
10326 else if (info.inFormat == RTAUDIO_SINT24) {
10327 Int24 *in = (Int24 *)inBuffer;
10328 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10329 for (j=0; j<info.channels; j++) {
10330 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]].asInt();
10331 out[info.outOffset[j]] <<= 8;
10334 out += info.outJump;
10337 else if (info.inFormat == RTAUDIO_SINT32) {
10338 // Channel compensation and/or (de)interleaving only.
10339 Int32 *in = (Int32 *)inBuffer;
10340 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10341 for (j=0; j<info.channels; j++) {
10342 out[info.outOffset[j]] = in[info.inOffset[j]];
10345 out += info.outJump;
10348 else if (info.inFormat == RTAUDIO_FLOAT32) {
10349 Float32 *in = (Float32 *)inBuffer;
10350 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10351 for (j=0; j<info.channels; j++) {
10352 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
10355 out += info.outJump;
10358 else if (info.inFormat == RTAUDIO_FLOAT64) {
10359 Float64 *in = (Float64 *)inBuffer;
10360 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10361 for (j=0; j<info.channels; j++) {
10362 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
10365 out += info.outJump;
10369 else if (info.outFormat == RTAUDIO_SINT24) {
10370 Int24 *out = (Int24 *)outBuffer;
10371 if (info.inFormat == RTAUDIO_SINT8) {
10372 signed char *in = (signed char *)inBuffer;
10373 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10374 for (j=0; j<info.channels; j++) {
10375 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 16);
10376 //out[info.outOffset[j]] <<= 16;
10379 out += info.outJump;
10382 else if (info.inFormat == RTAUDIO_SINT16) {
10383 Int16 *in = (Int16 *)inBuffer;
10384 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10385 for (j=0; j<info.channels; j++) {
10386 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 8);
10387 //out[info.outOffset[j]] <<= 8;
10390 out += info.outJump;
10393 else if (info.inFormat == RTAUDIO_SINT24) {
10394 // Channel compensation and/or (de)interleaving only.
10395 Int24 *in = (Int24 *)inBuffer;
10396 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10397 for (j=0; j<info.channels; j++) {
10398 out[info.outOffset[j]] = in[info.inOffset[j]];
10401 out += info.outJump;
10404 else if (info.inFormat == RTAUDIO_SINT32) {
10405 Int32 *in = (Int32 *)inBuffer;
10406 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10407 for (j=0; j<info.channels; j++) {
10408 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] >> 8);
10409 //out[info.outOffset[j]] >>= 8;
10412 out += info.outJump;
10415 else if (info.inFormat == RTAUDIO_FLOAT32) {
10416 Float32 *in = (Float32 *)inBuffer;
10417 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10418 for (j=0; j<info.channels; j++) {
10419 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
10422 out += info.outJump;
10425 else if (info.inFormat == RTAUDIO_FLOAT64) {
10426 Float64 *in = (Float64 *)inBuffer;
10427 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10428 for (j=0; j<info.channels; j++) {
10429 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
10432 out += info.outJump;
10436 else if (info.outFormat == RTAUDIO_SINT16) {
10437 Int16 *out = (Int16 *)outBuffer;
10438 if (info.inFormat == RTAUDIO_SINT8) {
10439 signed char *in = (signed char *)inBuffer;
10440 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10441 for (j=0; j<info.channels; j++) {
10442 out[info.outOffset[j]] = (Int16) in[info.inOffset[j]];
10443 out[info.outOffset[j]] <<= 8;
10446 out += info.outJump;
10449 else if (info.inFormat == RTAUDIO_SINT16) {
10450 // Channel compensation and/or (de)interleaving only.
10451 Int16 *in = (Int16 *)inBuffer;
10452 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10453 for (j=0; j<info.channels; j++) {
10454 out[info.outOffset[j]] = in[info.inOffset[j]];
10457 out += info.outJump;
10460 else if (info.inFormat == RTAUDIO_SINT24) {
10461 Int24 *in = (Int24 *)inBuffer;
10462 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10463 for (j=0; j<info.channels; j++) {
10464 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]].asInt() >> 8);
10467 out += info.outJump;
10470 else if (info.inFormat == RTAUDIO_SINT32) {
10471 Int32 *in = (Int32 *)inBuffer;
10472 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10473 for (j=0; j<info.channels; j++) {
10474 out[info.outOffset[j]] = (Int16) ((in[info.inOffset[j]] >> 16) & 0x0000ffff);
10477 out += info.outJump;
10480 else if (info.inFormat == RTAUDIO_FLOAT32) {
10481 Float32 *in = (Float32 *)inBuffer;
10482 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10483 for (j=0; j<info.channels; j++) {
10484 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
10487 out += info.outJump;
10490 else if (info.inFormat == RTAUDIO_FLOAT64) {
10491 Float64 *in = (Float64 *)inBuffer;
10492 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10493 for (j=0; j<info.channels; j++) {
10494 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
10497 out += info.outJump;
10501 else if (info.outFormat == RTAUDIO_SINT8) {
10502 signed char *out = (signed char *)outBuffer;
10503 if (info.inFormat == RTAUDIO_SINT8) {
10504 // Channel compensation and/or (de)interleaving only.
10505 signed char *in = (signed char *)inBuffer;
10506 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10507 for (j=0; j<info.channels; j++) {
10508 out[info.outOffset[j]] = in[info.inOffset[j]];
10511 out += info.outJump;
10514 if (info.inFormat == RTAUDIO_SINT16) {
10515 Int16 *in = (Int16 *)inBuffer;
10516 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10517 for (j=0; j<info.channels; j++) {
10518 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 8) & 0x00ff);
10521 out += info.outJump;
10524 else if (info.inFormat == RTAUDIO_SINT24) {
10525 Int24 *in = (Int24 *)inBuffer;
10526 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10527 for (j=0; j<info.channels; j++) {
10528 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]].asInt() >> 16);
10531 out += info.outJump;
10534 else if (info.inFormat == RTAUDIO_SINT32) {
10535 Int32 *in = (Int32 *)inBuffer;
10536 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10537 for (j=0; j<info.channels; j++) {
10538 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 24) & 0x000000ff);
10541 out += info.outJump;
10544 else if (info.inFormat == RTAUDIO_FLOAT32) {
10545 Float32 *in = (Float32 *)inBuffer;
10546 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10547 for (j=0; j<info.channels; j++) {
10548 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
10551 out += info.outJump;
10554 else if (info.inFormat == RTAUDIO_FLOAT64) {
10555 Float64 *in = (Float64 *)inBuffer;
10556 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10557 for (j=0; j<info.channels; j++) {
10558 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
10561 out += info.outJump;
10567 //static inline uint16_t bswap_16(uint16_t x) { return (x>>8) | (x<<8); }
10568 //static inline uint32_t bswap_32(uint32_t x) { return (bswap_16(x&0xffff)<<16) | (bswap_16(x>>16)); }
10569 //static inline uint64_t bswap_64(uint64_t x) { return (((unsigned long long)bswap_32(x&0xffffffffull))<<32) | (bswap_32(x>>32)); }
10571 void RtApi :: byteSwapBuffer( char *buffer, unsigned int samples, RtAudioFormat format )
10577 if ( format == RTAUDIO_SINT16 ) {
10578 for ( unsigned int i=0; i<samples; i++ ) {
10579 // Swap 1st and 2nd bytes.
10584 // Increment 2 bytes.
10588 else if ( format == RTAUDIO_SINT32 ||
10589 format == RTAUDIO_FLOAT32 ) {
10590 for ( unsigned int i=0; i<samples; i++ ) {
10591 // Swap 1st and 4th bytes.
10596 // Swap 2nd and 3rd bytes.
10602 // Increment 3 more bytes.
10606 else if ( format == RTAUDIO_SINT24 ) {
10607 for ( unsigned int i=0; i<samples; i++ ) {
10608 // Swap 1st and 3rd bytes.
10613 // Increment 2 more bytes.
10617 else if ( format == RTAUDIO_FLOAT64 ) {
10618 for ( unsigned int i=0; i<samples; i++ ) {
10619 // Swap 1st and 8th bytes
10624 // Swap 2nd and 7th bytes
10630 // Swap 3rd and 6th bytes
10636 // Swap 4th and 5th bytes
10642 // Increment 5 more bytes.
10648 // Indentation settings for Vim and Emacs
10650 // Local Variables:
10651 // c-basic-offset: 2
10652 // indent-tabs-mode: nil
10655 // vim: et sts=2 sw=2