1 /************************************************************************/
3 \brief Realtime audio i/o C++ classes.
5 RtAudio provides a common API (Application Programming Interface)
6 for realtime audio input/output across Linux (native ALSA, Jack,
7 and OSS), Macintosh OS X (CoreAudio and Jack), and Windows
8 (DirectSound, ASIO and WASAPI) operating systems.
10 RtAudio GitHub site: https://github.com/thestk/rtaudio
11 RtAudio WWW site: http://www.music.mcgill.ca/~gary/rtaudio/
13 RtAudio: realtime audio i/o C++ classes
14 Copyright (c) 2001-2019 Gary P. Scavone
16 Permission is hereby granted, free of charge, to any person
17 obtaining a copy of this software and associated documentation files
18 (the "Software"), to deal in the Software without restriction,
19 including without limitation the rights to use, copy, modify, merge,
20 publish, distribute, sublicense, and/or sell copies of the Software,
21 and to permit persons to whom the Software is furnished to do so,
22 subject to the following conditions:
24 The above copyright notice and this permission notice shall be
25 included in all copies or substantial portions of the Software.
27 Any person wishing to distribute modifications to the Software is
28 asked to send the modifications to the original developer so that
29 they can be incorporated into the canonical version. This is,
30 however, not a binding provision of this license.
32 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
33 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
34 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
35 IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
36 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
37 CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
38 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
40 /************************************************************************/
42 // RtAudio: Version 5.1.0
52 // Static variable definitions.
53 const unsigned int RtApi::MAX_SAMPLE_RATES = 14;
54 const unsigned int RtApi::SAMPLE_RATES[] = {
55 4000, 5512, 8000, 9600, 11025, 16000, 22050,
56 32000, 44100, 48000, 88200, 96000, 176400, 192000
59 #if defined(__WINDOWS_DS__) || defined(__WINDOWS_ASIO__) || defined(__WINDOWS_WASAPI__)
60 #define MUTEX_INITIALIZE(A) InitializeCriticalSection(A)
61 #define MUTEX_DESTROY(A) DeleteCriticalSection(A)
62 #define MUTEX_LOCK(A) EnterCriticalSection(A)
63 #define MUTEX_UNLOCK(A) LeaveCriticalSection(A)
67 static std::string convertCharPointerToStdString(const char *text)
69 return std::string(text);
72 static std::string convertCharPointerToStdString(const wchar_t *text)
74 int length = WideCharToMultiByte(CP_UTF8, 0, text, -1, NULL, 0, NULL, NULL);
75 std::string s( length-1, '\0' );
76 WideCharToMultiByte(CP_UTF8, 0, text, -1, &s[0], length, NULL, NULL);
80 #elif defined(__LINUX_ALSA__) || defined(__LINUX_PULSE__) || defined(__UNIX_JACK__) || defined(__LINUX_OSS__) || defined(__MACOSX_CORE__)
82 #define MUTEX_INITIALIZE(A) pthread_mutex_init(A, NULL)
83 #define MUTEX_DESTROY(A) pthread_mutex_destroy(A)
84 #define MUTEX_LOCK(A) pthread_mutex_lock(A)
85 #define MUTEX_UNLOCK(A) pthread_mutex_unlock(A)
87 #define MUTEX_INITIALIZE(A) abs(*A) // dummy definitions
88 #define MUTEX_DESTROY(A) abs(*A) // dummy definitions
91 // *************************************************** //
93 // RtAudio definitions.
95 // *************************************************** //
97 std::string RtAudio :: getVersion( void )
99 return RTAUDIO_VERSION;
102 // Define API names and display names.
103 // Must be in same order as API enum.
105 const char* rtaudio_api_names[][2] = {
106 { "unspecified" , "Unknown" },
108 { "pulse" , "Pulse" },
109 { "oss" , "OpenSoundSystem" },
111 { "core" , "CoreAudio" },
112 { "wasapi" , "WASAPI" },
114 { "ds" , "DirectSound" },
115 { "dummy" , "Dummy" },
117 const unsigned int rtaudio_num_api_names =
118 sizeof(rtaudio_api_names)/sizeof(rtaudio_api_names[0]);
120 // The order here will control the order of RtAudio's API search in
122 extern "C" const RtAudio::Api rtaudio_compiled_apis[] = {
123 #if defined(__UNIX_JACK__)
126 #if defined(__LINUX_PULSE__)
127 RtAudio::LINUX_PULSE,
129 #if defined(__LINUX_ALSA__)
132 #if defined(__LINUX_OSS__)
135 #if defined(__WINDOWS_ASIO__)
136 RtAudio::WINDOWS_ASIO,
138 #if defined(__WINDOWS_WASAPI__)
139 RtAudio::WINDOWS_WASAPI,
141 #if defined(__WINDOWS_DS__)
144 #if defined(__MACOSX_CORE__)
145 RtAudio::MACOSX_CORE,
147 #if defined(__RTAUDIO_DUMMY__)
148 RtAudio::RTAUDIO_DUMMY,
150 RtAudio::UNSPECIFIED,
152 extern "C" const unsigned int rtaudio_num_compiled_apis =
153 sizeof(rtaudio_compiled_apis)/sizeof(rtaudio_compiled_apis[0])-1;
156 // This is a compile-time check that rtaudio_num_api_names == RtAudio::NUM_APIS.
157 // If the build breaks here, check that they match.
158 template<bool b> class StaticAssert { private: StaticAssert() {} };
159 template<> class StaticAssert<true>{ public: StaticAssert() {} };
160 class StaticAssertions { StaticAssertions() {
161 StaticAssert<rtaudio_num_api_names == RtAudio::NUM_APIS>();
164 void RtAudio :: getCompiledApi( std::vector<RtAudio::Api> &apis )
166 apis = std::vector<RtAudio::Api>(rtaudio_compiled_apis,
167 rtaudio_compiled_apis + rtaudio_num_compiled_apis);
170 std::string RtAudio :: getApiName( RtAudio::Api api )
172 if (api < 0 || api >= RtAudio::NUM_APIS)
174 return rtaudio_api_names[api][0];
177 std::string RtAudio :: getApiDisplayName( RtAudio::Api api )
179 if (api < 0 || api >= RtAudio::NUM_APIS)
181 return rtaudio_api_names[api][1];
184 RtAudio::Api RtAudio :: getCompiledApiByName( const std::string &name )
187 for (i = 0; i < rtaudio_num_compiled_apis; ++i)
188 if (name == rtaudio_api_names[rtaudio_compiled_apis[i]][0])
189 return rtaudio_compiled_apis[i];
190 return RtAudio::UNSPECIFIED;
193 void RtAudio :: openRtApi( RtAudio::Api api )
199 #if defined(__UNIX_JACK__)
200 if ( api == UNIX_JACK )
201 rtapi_ = new RtApiJack();
203 #if defined(__LINUX_ALSA__)
204 if ( api == LINUX_ALSA )
205 rtapi_ = new RtApiAlsa();
207 #if defined(__LINUX_PULSE__)
208 if ( api == LINUX_PULSE )
209 rtapi_ = new RtApiPulse();
211 #if defined(__LINUX_OSS__)
212 if ( api == LINUX_OSS )
213 rtapi_ = new RtApiOss();
215 #if defined(__WINDOWS_ASIO__)
216 if ( api == WINDOWS_ASIO )
217 rtapi_ = new RtApiAsio();
219 #if defined(__WINDOWS_WASAPI__)
220 if ( api == WINDOWS_WASAPI )
221 rtapi_ = new RtApiWasapi();
223 #if defined(__WINDOWS_DS__)
224 if ( api == WINDOWS_DS )
225 rtapi_ = new RtApiDs();
227 #if defined(__MACOSX_CORE__)
228 if ( api == MACOSX_CORE )
229 rtapi_ = new RtApiCore();
231 #if defined(__RTAUDIO_DUMMY__)
232 if ( api == RTAUDIO_DUMMY )
233 rtapi_ = new RtApiDummy();
237 RtAudio :: RtAudio( RtAudio::Api api )
241 if ( api != UNSPECIFIED ) {
242 // Attempt to open the specified API.
244 if ( rtapi_ ) return;
246 // No compiled support for specified API value. Issue a debug
247 // warning and continue as if no API was specified.
248 std::cerr << "\nRtAudio: no compiled support for specified API argument!\n" << std::endl;
251 // Iterate through the compiled APIs and return as soon as we find
252 // one with at least one device or we reach the end of the list.
253 std::vector< RtAudio::Api > apis;
254 getCompiledApi( apis );
255 for ( unsigned int i=0; i<apis.size(); i++ ) {
256 openRtApi( apis[i] );
257 if ( rtapi_ && rtapi_->getDeviceCount() ) break;
260 if ( rtapi_ ) return;
262 // It should not be possible to get here because the preprocessor
263 // definition __RTAUDIO_DUMMY__ is automatically defined if no
264 // API-specific definitions are passed to the compiler. But just in
265 // case something weird happens, we'll thow an error.
266 std::string errorText = "\nRtAudio: no compiled API support found ... critical error!!\n\n";
267 throw( RtAudioError( errorText, RtAudioError::UNSPECIFIED ) );
270 RtAudio :: ~RtAudio()
276 void RtAudio :: openStream( RtAudio::StreamParameters *outputParameters,
277 RtAudio::StreamParameters *inputParameters,
278 RtAudioFormat format, unsigned int sampleRate,
279 unsigned int *bufferFrames,
280 RtAudioCallback callback, void *userData,
281 RtAudio::StreamOptions *options,
282 RtAudioErrorCallback errorCallback )
284 return rtapi_->openStream( outputParameters, inputParameters, format,
285 sampleRate, bufferFrames, callback,
286 userData, options, errorCallback );
289 // *************************************************** //
291 // Public RtApi definitions (see end of file for
292 // private or protected utility functions).
294 // *************************************************** //
298 stream_.state = STREAM_CLOSED;
299 stream_.mode = UNINITIALIZED;
300 stream_.apiHandle = 0;
301 stream_.userBuffer[0] = 0;
302 stream_.userBuffer[1] = 0;
303 MUTEX_INITIALIZE( &stream_.mutex );
304 showWarnings_ = true;
305 firstErrorOccurred_ = false;
310 MUTEX_DESTROY( &stream_.mutex );
313 void RtApi :: openStream( RtAudio::StreamParameters *oParams,
314 RtAudio::StreamParameters *iParams,
315 RtAudioFormat format, unsigned int sampleRate,
316 unsigned int *bufferFrames,
317 RtAudioCallback callback, void *userData,
318 RtAudio::StreamOptions *options,
319 RtAudioErrorCallback errorCallback )
321 if ( stream_.state != STREAM_CLOSED ) {
322 errorText_ = "RtApi::openStream: a stream is already open!";
323 error( RtAudioError::INVALID_USE );
327 // Clear stream information potentially left from a previously open stream.
330 if ( oParams && oParams->nChannels < 1 ) {
331 errorText_ = "RtApi::openStream: a non-NULL output StreamParameters structure cannot have an nChannels value less than one.";
332 error( RtAudioError::INVALID_USE );
336 if ( iParams && iParams->nChannels < 1 ) {
337 errorText_ = "RtApi::openStream: a non-NULL input StreamParameters structure cannot have an nChannels value less than one.";
338 error( RtAudioError::INVALID_USE );
342 if ( oParams == NULL && iParams == NULL ) {
343 errorText_ = "RtApi::openStream: input and output StreamParameters structures are both NULL!";
344 error( RtAudioError::INVALID_USE );
348 if ( formatBytes(format) == 0 ) {
349 errorText_ = "RtApi::openStream: 'format' parameter value is undefined.";
350 error( RtAudioError::INVALID_USE );
354 unsigned int nDevices = getDeviceCount();
355 unsigned int oChannels = 0;
357 oChannels = oParams->nChannels;
358 if ( oParams->deviceId >= nDevices ) {
359 errorText_ = "RtApi::openStream: output device parameter value is invalid.";
360 error( RtAudioError::INVALID_USE );
365 unsigned int iChannels = 0;
367 iChannels = iParams->nChannels;
368 if ( iParams->deviceId >= nDevices ) {
369 errorText_ = "RtApi::openStream: input device parameter value is invalid.";
370 error( RtAudioError::INVALID_USE );
377 if ( oChannels > 0 ) {
379 result = probeDeviceOpen( oParams->deviceId, OUTPUT, oChannels, oParams->firstChannel,
380 sampleRate, format, bufferFrames, options );
381 if ( result == false ) {
382 error( RtAudioError::SYSTEM_ERROR );
387 if ( iChannels > 0 ) {
389 result = probeDeviceOpen( iParams->deviceId, INPUT, iChannels, iParams->firstChannel,
390 sampleRate, format, bufferFrames, options );
391 if ( result == false ) {
392 if ( oChannels > 0 ) closeStream();
393 error( RtAudioError::SYSTEM_ERROR );
398 stream_.callbackInfo.callback = (void *) callback;
399 stream_.callbackInfo.userData = userData;
400 stream_.callbackInfo.errorCallback = (void *) errorCallback;
402 if ( options ) options->numberOfBuffers = stream_.nBuffers;
403 stream_.state = STREAM_STOPPED;
406 unsigned int RtApi :: getDefaultInputDevice( void )
408 // Should be implemented in subclasses if possible.
412 unsigned int RtApi :: getDefaultOutputDevice( void )
414 // Should be implemented in subclasses if possible.
418 void RtApi :: closeStream( void )
420 // MUST be implemented in subclasses!
424 bool RtApi :: probeDeviceOpen( unsigned int /*device*/, StreamMode /*mode*/, unsigned int /*channels*/,
425 unsigned int /*firstChannel*/, unsigned int /*sampleRate*/,
426 RtAudioFormat /*format*/, unsigned int * /*bufferSize*/,
427 RtAudio::StreamOptions * /*options*/ )
429 // MUST be implemented in subclasses!
433 void RtApi :: tickStreamTime( void )
435 // Subclasses that do not provide their own implementation of
436 // getStreamTime should call this function once per buffer I/O to
437 // provide basic stream time support.
439 stream_.streamTime += ( stream_.bufferSize * 1.0 / stream_.sampleRate );
441 #if defined( HAVE_GETTIMEOFDAY )
442 gettimeofday( &stream_.lastTickTimestamp, NULL );
446 long RtApi :: getStreamLatency( void )
450 long totalLatency = 0;
451 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
452 totalLatency = stream_.latency[0];
453 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
454 totalLatency += stream_.latency[1];
459 double RtApi :: getStreamTime( void )
463 #if defined( HAVE_GETTIMEOFDAY )
464 // Return a very accurate estimate of the stream time by
465 // adding in the elapsed time since the last tick.
469 if ( stream_.state != STREAM_RUNNING || stream_.streamTime == 0.0 )
470 return stream_.streamTime;
472 gettimeofday( &now, NULL );
473 then = stream_.lastTickTimestamp;
474 return stream_.streamTime +
475 ((now.tv_sec + 0.000001 * now.tv_usec) -
476 (then.tv_sec + 0.000001 * then.tv_usec));
478 return stream_.streamTime;
482 void RtApi :: setStreamTime( double time )
487 stream_.streamTime = time;
488 #if defined( HAVE_GETTIMEOFDAY )
489 gettimeofday( &stream_.lastTickTimestamp, NULL );
493 unsigned int RtApi :: getStreamSampleRate( void )
497 return stream_.sampleRate;
501 // *************************************************** //
503 // OS/API-specific methods.
505 // *************************************************** //
507 #if defined(__MACOSX_CORE__)
509 // The OS X CoreAudio API is designed to use a separate callback
510 // procedure for each of its audio devices. A single RtAudio duplex
511 // stream using two different devices is supported here, though it
512 // cannot be guaranteed to always behave correctly because we cannot
513 // synchronize these two callbacks.
515 // A property listener is installed for over/underrun information.
516 // However, no functionality is currently provided to allow property
517 // listeners to trigger user handlers because it is unclear what could
518 // be done if a critical stream parameter (buffer size, sample rate,
519 // device disconnect) notification arrived. The listeners entail
520 // quite a bit of extra code and most likely, a user program wouldn't
521 // be prepared for the result anyway. However, we do provide a flag
522 // to the client callback function to inform of an over/underrun.
524 // A structure to hold various information related to the CoreAudio API
527 AudioDeviceID id[2]; // device ids
528 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
529 AudioDeviceIOProcID procId[2];
531 UInt32 iStream[2]; // device stream index (or first if using multiple)
532 UInt32 nStreams[2]; // number of streams to use
535 pthread_cond_t condition;
536 int drainCounter; // Tracks callback counts when draining
537 bool internalDrain; // Indicates if stop is initiated from callback or not.
540 :deviceBuffer(0), drainCounter(0), internalDrain(false) { nStreams[0] = 1; nStreams[1] = 1; id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
543 RtApiCore:: RtApiCore()
545 #if defined( AVAILABLE_MAC_OS_X_VERSION_10_6_AND_LATER )
546 // This is a largely undocumented but absolutely necessary
547 // requirement starting with OS-X 10.6. If not called, queries and
548 // updates to various audio device properties are not handled
550 CFRunLoopRef theRunLoop = NULL;
551 AudioObjectPropertyAddress property = { kAudioHardwarePropertyRunLoop,
552 kAudioObjectPropertyScopeGlobal,
553 kAudioObjectPropertyElementMaster };
554 OSStatus result = AudioObjectSetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, sizeof(CFRunLoopRef), &theRunLoop);
555 if ( result != noErr ) {
556 errorText_ = "RtApiCore::RtApiCore: error setting run loop property!";
557 error( RtAudioError::WARNING );
562 RtApiCore :: ~RtApiCore()
564 // The subclass destructor gets called before the base class
565 // destructor, so close an existing stream before deallocating
566 // apiDeviceId memory.
567 if ( stream_.state != STREAM_CLOSED ) closeStream();
570 unsigned int RtApiCore :: getDeviceCount( void )
572 // Find out how many audio devices there are, if any.
574 AudioObjectPropertyAddress propertyAddress = { kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
575 OSStatus result = AudioObjectGetPropertyDataSize( kAudioObjectSystemObject, &propertyAddress, 0, NULL, &dataSize );
576 if ( result != noErr ) {
577 errorText_ = "RtApiCore::getDeviceCount: OS-X error getting device info!";
578 error( RtAudioError::WARNING );
582 return dataSize / sizeof( AudioDeviceID );
585 unsigned int RtApiCore :: getDefaultInputDevice( void )
587 unsigned int nDevices = getDeviceCount();
588 if ( nDevices <= 1 ) return 0;
591 UInt32 dataSize = sizeof( AudioDeviceID );
592 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultInputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
593 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
594 if ( result != noErr ) {
595 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device.";
596 error( RtAudioError::WARNING );
600 dataSize *= nDevices;
601 AudioDeviceID deviceList[ nDevices ];
602 property.mSelector = kAudioHardwarePropertyDevices;
603 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
604 if ( result != noErr ) {
605 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device IDs.";
606 error( RtAudioError::WARNING );
610 for ( unsigned int i=0; i<nDevices; i++ )
611 if ( id == deviceList[i] ) return i;
613 errorText_ = "RtApiCore::getDefaultInputDevice: No default device found!";
614 error( RtAudioError::WARNING );
618 unsigned int RtApiCore :: getDefaultOutputDevice( void )
620 unsigned int nDevices = getDeviceCount();
621 if ( nDevices <= 1 ) return 0;
624 UInt32 dataSize = sizeof( AudioDeviceID );
625 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultOutputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
626 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
627 if ( result != noErr ) {
628 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device.";
629 error( RtAudioError::WARNING );
633 dataSize = sizeof( AudioDeviceID ) * nDevices;
634 AudioDeviceID deviceList[ nDevices ];
635 property.mSelector = kAudioHardwarePropertyDevices;
636 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
637 if ( result != noErr ) {
638 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device IDs.";
639 error( RtAudioError::WARNING );
643 for ( unsigned int i=0; i<nDevices; i++ )
644 if ( id == deviceList[i] ) return i;
646 errorText_ = "RtApiCore::getDefaultOutputDevice: No default device found!";
647 error( RtAudioError::WARNING );
651 RtAudio::DeviceInfo RtApiCore :: getDeviceInfo( unsigned int device )
653 RtAudio::DeviceInfo info;
657 unsigned int nDevices = getDeviceCount();
658 if ( nDevices == 0 ) {
659 errorText_ = "RtApiCore::getDeviceInfo: no devices found!";
660 error( RtAudioError::INVALID_USE );
664 if ( device >= nDevices ) {
665 errorText_ = "RtApiCore::getDeviceInfo: device ID is invalid!";
666 error( RtAudioError::INVALID_USE );
670 AudioDeviceID deviceList[ nDevices ];
671 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
672 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
673 kAudioObjectPropertyScopeGlobal,
674 kAudioObjectPropertyElementMaster };
675 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
676 0, NULL, &dataSize, (void *) &deviceList );
677 if ( result != noErr ) {
678 errorText_ = "RtApiCore::getDeviceInfo: OS-X system error getting device IDs.";
679 error( RtAudioError::WARNING );
683 AudioDeviceID id = deviceList[ device ];
685 // Get the device name.
688 dataSize = sizeof( CFStringRef );
689 property.mSelector = kAudioObjectPropertyManufacturer;
690 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
691 if ( result != noErr ) {
692 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device manufacturer.";
693 errorText_ = errorStream_.str();
694 error( RtAudioError::WARNING );
698 //const char *mname = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
699 int length = CFStringGetLength(cfname);
700 char *mname = (char *)malloc(length * 3 + 1);
701 #if defined( UNICODE ) || defined( _UNICODE )
702 CFStringGetCString(cfname, mname, length * 3 + 1, kCFStringEncodingUTF8);
704 CFStringGetCString(cfname, mname, length * 3 + 1, CFStringGetSystemEncoding());
706 info.name.append( (const char *)mname, strlen(mname) );
707 info.name.append( ": " );
711 property.mSelector = kAudioObjectPropertyName;
712 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
713 if ( result != noErr ) {
714 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device name.";
715 errorText_ = errorStream_.str();
716 error( RtAudioError::WARNING );
720 //const char *name = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
721 length = CFStringGetLength(cfname);
722 char *name = (char *)malloc(length * 3 + 1);
723 #if defined( UNICODE ) || defined( _UNICODE )
724 CFStringGetCString(cfname, name, length * 3 + 1, kCFStringEncodingUTF8);
726 CFStringGetCString(cfname, name, length * 3 + 1, CFStringGetSystemEncoding());
728 info.name.append( (const char *)name, strlen(name) );
732 // Get the output stream "configuration".
733 AudioBufferList *bufferList = nil;
734 property.mSelector = kAudioDevicePropertyStreamConfiguration;
735 property.mScope = kAudioDevicePropertyScopeOutput;
736 // property.mElement = kAudioObjectPropertyElementWildcard;
738 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
739 if ( result != noErr || dataSize == 0 ) {
740 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration info for device (" << device << ").";
741 errorText_ = errorStream_.str();
742 error( RtAudioError::WARNING );
746 // Allocate the AudioBufferList.
747 bufferList = (AudioBufferList *) malloc( dataSize );
748 if ( bufferList == NULL ) {
749 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating output AudioBufferList.";
750 error( RtAudioError::WARNING );
754 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
755 if ( result != noErr || dataSize == 0 ) {
757 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration for device (" << device << ").";
758 errorText_ = errorStream_.str();
759 error( RtAudioError::WARNING );
763 // Get output channel information.
764 unsigned int i, nStreams = bufferList->mNumberBuffers;
765 for ( i=0; i<nStreams; i++ )
766 info.outputChannels += bufferList->mBuffers[i].mNumberChannels;
769 // Get the input stream "configuration".
770 property.mScope = kAudioDevicePropertyScopeInput;
771 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
772 if ( result != noErr || dataSize == 0 ) {
773 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration info for device (" << device << ").";
774 errorText_ = errorStream_.str();
775 error( RtAudioError::WARNING );
779 // Allocate the AudioBufferList.
780 bufferList = (AudioBufferList *) malloc( dataSize );
781 if ( bufferList == NULL ) {
782 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating input AudioBufferList.";
783 error( RtAudioError::WARNING );
787 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
788 if (result != noErr || dataSize == 0) {
790 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration for device (" << device << ").";
791 errorText_ = errorStream_.str();
792 error( RtAudioError::WARNING );
796 // Get input channel information.
797 nStreams = bufferList->mNumberBuffers;
798 for ( i=0; i<nStreams; i++ )
799 info.inputChannels += bufferList->mBuffers[i].mNumberChannels;
802 // If device opens for both playback and capture, we determine the channels.
803 if ( info.outputChannels > 0 && info.inputChannels > 0 )
804 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
806 // Probe the device sample rates.
807 bool isInput = false;
808 if ( info.outputChannels == 0 ) isInput = true;
810 // Determine the supported sample rates.
811 property.mSelector = kAudioDevicePropertyAvailableNominalSampleRates;
812 if ( isInput == false ) property.mScope = kAudioDevicePropertyScopeOutput;
813 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
814 if ( result != kAudioHardwareNoError || dataSize == 0 ) {
815 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rate info.";
816 errorText_ = errorStream_.str();
817 error( RtAudioError::WARNING );
821 UInt32 nRanges = dataSize / sizeof( AudioValueRange );
822 AudioValueRange rangeList[ nRanges ];
823 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &rangeList );
824 if ( result != kAudioHardwareNoError ) {
825 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rates.";
826 errorText_ = errorStream_.str();
827 error( RtAudioError::WARNING );
831 // The sample rate reporting mechanism is a bit of a mystery. It
832 // seems that it can either return individual rates or a range of
833 // rates. I assume that if the min / max range values are the same,
834 // then that represents a single supported rate and if the min / max
835 // range values are different, the device supports an arbitrary
836 // range of values (though there might be multiple ranges, so we'll
837 // use the most conservative range).
838 Float64 minimumRate = 1.0, maximumRate = 10000000000.0;
839 bool haveValueRange = false;
840 info.sampleRates.clear();
841 for ( UInt32 i=0; i<nRanges; i++ ) {
842 if ( rangeList[i].mMinimum == rangeList[i].mMaximum ) {
843 unsigned int tmpSr = (unsigned int) rangeList[i].mMinimum;
844 info.sampleRates.push_back( tmpSr );
846 if ( !info.preferredSampleRate || ( tmpSr <= 48000 && tmpSr > info.preferredSampleRate ) )
847 info.preferredSampleRate = tmpSr;
850 haveValueRange = true;
851 if ( rangeList[i].mMinimum > minimumRate ) minimumRate = rangeList[i].mMinimum;
852 if ( rangeList[i].mMaximum < maximumRate ) maximumRate = rangeList[i].mMaximum;
856 if ( haveValueRange ) {
857 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
858 if ( SAMPLE_RATES[k] >= (unsigned int) minimumRate && SAMPLE_RATES[k] <= (unsigned int) maximumRate ) {
859 info.sampleRates.push_back( SAMPLE_RATES[k] );
861 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
862 info.preferredSampleRate = SAMPLE_RATES[k];
867 // Sort and remove any redundant values
868 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
869 info.sampleRates.erase( unique( info.sampleRates.begin(), info.sampleRates.end() ), info.sampleRates.end() );
871 if ( info.sampleRates.size() == 0 ) {
872 errorStream_ << "RtApiCore::probeDeviceInfo: No supported sample rates found for device (" << device << ").";
873 errorText_ = errorStream_.str();
874 error( RtAudioError::WARNING );
878 // CoreAudio always uses 32-bit floating point data for PCM streams.
879 // Thus, any other "physical" formats supported by the device are of
880 // no interest to the client.
881 info.nativeFormats = RTAUDIO_FLOAT32;
883 if ( info.outputChannels > 0 )
884 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
885 if ( info.inputChannels > 0 )
886 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
892 static OSStatus callbackHandler( AudioDeviceID inDevice,
893 const AudioTimeStamp* /*inNow*/,
894 const AudioBufferList* inInputData,
895 const AudioTimeStamp* /*inInputTime*/,
896 AudioBufferList* outOutputData,
897 const AudioTimeStamp* /*inOutputTime*/,
900 CallbackInfo *info = (CallbackInfo *) infoPointer;
902 RtApiCore *object = (RtApiCore *) info->object;
903 if ( object->callbackEvent( inDevice, inInputData, outOutputData ) == false )
904 return kAudioHardwareUnspecifiedError;
906 return kAudioHardwareNoError;
909 static OSStatus xrunListener( AudioObjectID /*inDevice*/,
911 const AudioObjectPropertyAddress properties[],
912 void* handlePointer )
914 CoreHandle *handle = (CoreHandle *) handlePointer;
915 for ( UInt32 i=0; i<nAddresses; i++ ) {
916 if ( properties[i].mSelector == kAudioDeviceProcessorOverload ) {
917 if ( properties[i].mScope == kAudioDevicePropertyScopeInput )
918 handle->xrun[1] = true;
920 handle->xrun[0] = true;
924 return kAudioHardwareNoError;
927 static OSStatus rateListener( AudioObjectID inDevice,
928 UInt32 /*nAddresses*/,
929 const AudioObjectPropertyAddress /*properties*/[],
932 Float64 *rate = (Float64 *) ratePointer;
933 UInt32 dataSize = sizeof( Float64 );
934 AudioObjectPropertyAddress property = { kAudioDevicePropertyNominalSampleRate,
935 kAudioObjectPropertyScopeGlobal,
936 kAudioObjectPropertyElementMaster };
937 AudioObjectGetPropertyData( inDevice, &property, 0, NULL, &dataSize, rate );
938 return kAudioHardwareNoError;
941 bool RtApiCore :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
942 unsigned int firstChannel, unsigned int sampleRate,
943 RtAudioFormat format, unsigned int *bufferSize,
944 RtAudio::StreamOptions *options )
947 unsigned int nDevices = getDeviceCount();
948 if ( nDevices == 0 ) {
949 // This should not happen because a check is made before this function is called.
950 errorText_ = "RtApiCore::probeDeviceOpen: no devices found!";
954 if ( device >= nDevices ) {
955 // This should not happen because a check is made before this function is called.
956 errorText_ = "RtApiCore::probeDeviceOpen: device ID is invalid!";
960 AudioDeviceID deviceList[ nDevices ];
961 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
962 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
963 kAudioObjectPropertyScopeGlobal,
964 kAudioObjectPropertyElementMaster };
965 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
966 0, NULL, &dataSize, (void *) &deviceList );
967 if ( result != noErr ) {
968 errorText_ = "RtApiCore::probeDeviceOpen: OS-X system error getting device IDs.";
972 AudioDeviceID id = deviceList[ device ];
974 // Setup for stream mode.
975 bool isInput = false;
976 if ( mode == INPUT ) {
978 property.mScope = kAudioDevicePropertyScopeInput;
981 property.mScope = kAudioDevicePropertyScopeOutput;
983 // Get the stream "configuration".
984 AudioBufferList *bufferList = nil;
986 property.mSelector = kAudioDevicePropertyStreamConfiguration;
987 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
988 if ( result != noErr || dataSize == 0 ) {
989 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration info for device (" << device << ").";
990 errorText_ = errorStream_.str();
994 // Allocate the AudioBufferList.
995 bufferList = (AudioBufferList *) malloc( dataSize );
996 if ( bufferList == NULL ) {
997 errorText_ = "RtApiCore::probeDeviceOpen: memory error allocating AudioBufferList.";
1001 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
1002 if (result != noErr || dataSize == 0) {
1004 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration for device (" << device << ").";
1005 errorText_ = errorStream_.str();
1009 // Search for one or more streams that contain the desired number of
1010 // channels. CoreAudio devices can have an arbitrary number of
1011 // streams and each stream can have an arbitrary number of channels.
1012 // For each stream, a single buffer of interleaved samples is
1013 // provided. RtAudio prefers the use of one stream of interleaved
1014 // data or multiple consecutive single-channel streams. However, we
1015 // now support multiple consecutive multi-channel streams of
1016 // interleaved data as well.
1017 UInt32 iStream, offsetCounter = firstChannel;
1018 UInt32 nStreams = bufferList->mNumberBuffers;
1019 bool monoMode = false;
1020 bool foundStream = false;
1022 // First check that the device supports the requested number of
1024 UInt32 deviceChannels = 0;
1025 for ( iStream=0; iStream<nStreams; iStream++ )
1026 deviceChannels += bufferList->mBuffers[iStream].mNumberChannels;
1028 if ( deviceChannels < ( channels + firstChannel ) ) {
1030 errorStream_ << "RtApiCore::probeDeviceOpen: the device (" << device << ") does not support the requested channel count.";
1031 errorText_ = errorStream_.str();
1035 // Look for a single stream meeting our needs.
1036 UInt32 firstStream, streamCount = 1, streamChannels = 0, channelOffset = 0;
1037 for ( iStream=0; iStream<nStreams; iStream++ ) {
1038 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
1039 if ( streamChannels >= channels + offsetCounter ) {
1040 firstStream = iStream;
1041 channelOffset = offsetCounter;
1045 if ( streamChannels > offsetCounter ) break;
1046 offsetCounter -= streamChannels;
1049 // If we didn't find a single stream above, then we should be able
1050 // to meet the channel specification with multiple streams.
1051 if ( foundStream == false ) {
1053 offsetCounter = firstChannel;
1054 for ( iStream=0; iStream<nStreams; iStream++ ) {
1055 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
1056 if ( streamChannels > offsetCounter ) break;
1057 offsetCounter -= streamChannels;
1060 firstStream = iStream;
1061 channelOffset = offsetCounter;
1062 Int32 channelCounter = channels + offsetCounter - streamChannels;
1064 if ( streamChannels > 1 ) monoMode = false;
1065 while ( channelCounter > 0 ) {
1066 streamChannels = bufferList->mBuffers[++iStream].mNumberChannels;
1067 if ( streamChannels > 1 ) monoMode = false;
1068 channelCounter -= streamChannels;
1075 // Determine the buffer size.
1076 AudioValueRange bufferRange;
1077 dataSize = sizeof( AudioValueRange );
1078 property.mSelector = kAudioDevicePropertyBufferFrameSizeRange;
1079 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &bufferRange );
1081 if ( result != noErr ) {
1082 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting buffer size range for device (" << device << ").";
1083 errorText_ = errorStream_.str();
1087 if ( bufferRange.mMinimum > *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMinimum;
1088 else if ( bufferRange.mMaximum < *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMaximum;
1089 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) *bufferSize = (unsigned long) bufferRange.mMinimum;
1091 // Set the buffer size. For multiple streams, I'm assuming we only
1092 // need to make this setting for the master channel.
1093 UInt32 theSize = (UInt32) *bufferSize;
1094 dataSize = sizeof( UInt32 );
1095 property.mSelector = kAudioDevicePropertyBufferFrameSize;
1096 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &theSize );
1098 if ( result != noErr ) {
1099 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting the buffer size for device (" << device << ").";
1100 errorText_ = errorStream_.str();
1104 // If attempting to setup a duplex stream, the bufferSize parameter
1105 // MUST be the same in both directions!
1106 *bufferSize = theSize;
1107 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
1108 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << device << ").";
1109 errorText_ = errorStream_.str();
1113 stream_.bufferSize = *bufferSize;
1114 stream_.nBuffers = 1;
1116 // Try to set "hog" mode ... it's not clear to me this is working.
1117 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) {
1119 dataSize = sizeof( hog_pid );
1120 property.mSelector = kAudioDevicePropertyHogMode;
1121 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &hog_pid );
1122 if ( result != noErr ) {
1123 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting 'hog' state!";
1124 errorText_ = errorStream_.str();
1128 if ( hog_pid != getpid() ) {
1130 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &hog_pid );
1131 if ( result != noErr ) {
1132 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting 'hog' state!";
1133 errorText_ = errorStream_.str();
1139 // Check and if necessary, change the sample rate for the device.
1140 Float64 nominalRate;
1141 dataSize = sizeof( Float64 );
1142 property.mSelector = kAudioDevicePropertyNominalSampleRate;
1143 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &nominalRate );
1144 if ( result != noErr ) {
1145 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting current sample rate.";
1146 errorText_ = errorStream_.str();
1150 // Only change the sample rate if off by more than 1 Hz.
1151 if ( fabs( nominalRate - (double)sampleRate ) > 1.0 ) {
1153 // Set a property listener for the sample rate change
1154 Float64 reportedRate = 0.0;
1155 AudioObjectPropertyAddress tmp = { kAudioDevicePropertyNominalSampleRate, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
1156 result = AudioObjectAddPropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1157 if ( result != noErr ) {
1158 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate property listener for device (" << device << ").";
1159 errorText_ = errorStream_.str();
1163 nominalRate = (Float64) sampleRate;
1164 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &nominalRate );
1165 if ( result != noErr ) {
1166 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1167 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate for device (" << device << ").";
1168 errorText_ = errorStream_.str();
1172 // Now wait until the reported nominal rate is what we just set.
1173 UInt32 microCounter = 0;
1174 while ( reportedRate != nominalRate ) {
1175 microCounter += 5000;
1176 if ( microCounter > 5000000 ) break;
1180 // Remove the property listener.
1181 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1183 if ( microCounter > 5000000 ) {
1184 errorStream_ << "RtApiCore::probeDeviceOpen: timeout waiting for sample rate update for device (" << device << ").";
1185 errorText_ = errorStream_.str();
1190 // Now set the stream format for all streams. Also, check the
1191 // physical format of the device and change that if necessary.
1192 AudioStreamBasicDescription description;
1193 dataSize = sizeof( AudioStreamBasicDescription );
1194 property.mSelector = kAudioStreamPropertyVirtualFormat;
1195 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
1196 if ( result != noErr ) {
1197 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream format for device (" << device << ").";
1198 errorText_ = errorStream_.str();
1202 // Set the sample rate and data format id. However, only make the
1203 // change if the sample rate is not within 1.0 of the desired
1204 // rate and the format is not linear pcm.
1205 bool updateFormat = false;
1206 if ( fabs( description.mSampleRate - (Float64)sampleRate ) > 1.0 ) {
1207 description.mSampleRate = (Float64) sampleRate;
1208 updateFormat = true;
1211 if ( description.mFormatID != kAudioFormatLinearPCM ) {
1212 description.mFormatID = kAudioFormatLinearPCM;
1213 updateFormat = true;
1216 if ( updateFormat ) {
1217 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &description );
1218 if ( result != noErr ) {
1219 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate or data format for device (" << device << ").";
1220 errorText_ = errorStream_.str();
1225 // Now check the physical format.
1226 property.mSelector = kAudioStreamPropertyPhysicalFormat;
1227 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
1228 if ( result != noErr ) {
1229 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream physical format for device (" << device << ").";
1230 errorText_ = errorStream_.str();
1234 //std::cout << "Current physical stream format:" << std::endl;
1235 //std::cout << " mBitsPerChan = " << description.mBitsPerChannel << std::endl;
1236 //std::cout << " aligned high = " << (description.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (description.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
1237 //std::cout << " bytesPerFrame = " << description.mBytesPerFrame << std::endl;
1238 //std::cout << " sample rate = " << description.mSampleRate << std::endl;
1240 if ( description.mFormatID != kAudioFormatLinearPCM || description.mBitsPerChannel < 16 ) {
1241 description.mFormatID = kAudioFormatLinearPCM;
1242 //description.mSampleRate = (Float64) sampleRate;
1243 AudioStreamBasicDescription testDescription = description;
1246 // We'll try higher bit rates first and then work our way down.
1247 std::vector< std::pair<UInt32, UInt32> > physicalFormats;
1248 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsFloat) & ~kLinearPCMFormatFlagIsSignedInteger;
1249 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
1250 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
1251 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
1252 physicalFormats.push_back( std::pair<Float32, UInt32>( 24, formatFlags ) ); // 24-bit packed
1253 formatFlags &= ~( kAudioFormatFlagIsPacked | kAudioFormatFlagIsAlignedHigh );
1254 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.2, formatFlags ) ); // 24-bit in 4 bytes, aligned low
1255 formatFlags |= kAudioFormatFlagIsAlignedHigh;
1256 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.4, formatFlags ) ); // 24-bit in 4 bytes, aligned high
1257 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
1258 physicalFormats.push_back( std::pair<Float32, UInt32>( 16, formatFlags ) );
1259 physicalFormats.push_back( std::pair<Float32, UInt32>( 8, formatFlags ) );
1261 bool setPhysicalFormat = false;
1262 for( unsigned int i=0; i<physicalFormats.size(); i++ ) {
1263 testDescription = description;
1264 testDescription.mBitsPerChannel = (UInt32) physicalFormats[i].first;
1265 testDescription.mFormatFlags = physicalFormats[i].second;
1266 if ( (24 == (UInt32)physicalFormats[i].first) && ~( physicalFormats[i].second & kAudioFormatFlagIsPacked ) )
1267 testDescription.mBytesPerFrame = 4 * testDescription.mChannelsPerFrame;
1269 testDescription.mBytesPerFrame = testDescription.mBitsPerChannel/8 * testDescription.mChannelsPerFrame;
1270 testDescription.mBytesPerPacket = testDescription.mBytesPerFrame * testDescription.mFramesPerPacket;
1271 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &testDescription );
1272 if ( result == noErr ) {
1273 setPhysicalFormat = true;
1274 //std::cout << "Updated physical stream format:" << std::endl;
1275 //std::cout << " mBitsPerChan = " << testDescription.mBitsPerChannel << std::endl;
1276 //std::cout << " aligned high = " << (testDescription.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (testDescription.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
1277 //std::cout << " bytesPerFrame = " << testDescription.mBytesPerFrame << std::endl;
1278 //std::cout << " sample rate = " << testDescription.mSampleRate << std::endl;
1283 if ( !setPhysicalFormat ) {
1284 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting physical data format for device (" << device << ").";
1285 errorText_ = errorStream_.str();
1288 } // done setting virtual/physical formats.
1290 // Get the stream / device latency.
1292 dataSize = sizeof( UInt32 );
1293 property.mSelector = kAudioDevicePropertyLatency;
1294 if ( AudioObjectHasProperty( id, &property ) == true ) {
1295 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &latency );
1296 if ( result == kAudioHardwareNoError ) stream_.latency[ mode ] = latency;
1298 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting device latency for device (" << device << ").";
1299 errorText_ = errorStream_.str();
1300 error( RtAudioError::WARNING );
1304 // Byte-swapping: According to AudioHardware.h, the stream data will
1305 // always be presented in native-endian format, so we should never
1306 // need to byte swap.
1307 stream_.doByteSwap[mode] = false;
1309 // From the CoreAudio documentation, PCM data must be supplied as
1311 stream_.userFormat = format;
1312 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
1314 if ( streamCount == 1 )
1315 stream_.nDeviceChannels[mode] = description.mChannelsPerFrame;
1316 else // multiple streams
1317 stream_.nDeviceChannels[mode] = channels;
1318 stream_.nUserChannels[mode] = channels;
1319 stream_.channelOffset[mode] = channelOffset; // offset within a CoreAudio stream
1320 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
1321 else stream_.userInterleaved = true;
1322 stream_.deviceInterleaved[mode] = true;
1323 if ( monoMode == true ) stream_.deviceInterleaved[mode] = false;
1325 // Set flags for buffer conversion.
1326 stream_.doConvertBuffer[mode] = false;
1327 if ( stream_.userFormat != stream_.deviceFormat[mode] )
1328 stream_.doConvertBuffer[mode] = true;
1329 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
1330 stream_.doConvertBuffer[mode] = true;
1331 if ( streamCount == 1 ) {
1332 if ( stream_.nUserChannels[mode] > 1 &&
1333 stream_.userInterleaved != stream_.deviceInterleaved[mode] )
1334 stream_.doConvertBuffer[mode] = true;
1336 else if ( monoMode && stream_.userInterleaved )
1337 stream_.doConvertBuffer[mode] = true;
1339 // Allocate our CoreHandle structure for the stream.
1340 CoreHandle *handle = 0;
1341 if ( stream_.apiHandle == 0 ) {
1343 handle = new CoreHandle;
1345 catch ( std::bad_alloc& ) {
1346 errorText_ = "RtApiCore::probeDeviceOpen: error allocating CoreHandle memory.";
1350 if ( pthread_cond_init( &handle->condition, NULL ) ) {
1351 errorText_ = "RtApiCore::probeDeviceOpen: error initializing pthread condition variable.";
1354 stream_.apiHandle = (void *) handle;
1357 handle = (CoreHandle *) stream_.apiHandle;
1358 handle->iStream[mode] = firstStream;
1359 handle->nStreams[mode] = streamCount;
1360 handle->id[mode] = id;
1362 // Allocate necessary internal buffers.
1363 unsigned long bufferBytes;
1364 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
1365 // stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
1366 stream_.userBuffer[mode] = (char *) malloc( bufferBytes * sizeof(char) );
1367 memset( stream_.userBuffer[mode], 0, bufferBytes * sizeof(char) );
1368 if ( stream_.userBuffer[mode] == NULL ) {
1369 errorText_ = "RtApiCore::probeDeviceOpen: error allocating user buffer memory.";
1373 // If possible, we will make use of the CoreAudio stream buffers as
1374 // "device buffers". However, we can't do this if using multiple
1376 if ( stream_.doConvertBuffer[mode] && handle->nStreams[mode] > 1 ) {
1378 bool makeBuffer = true;
1379 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
1380 if ( mode == INPUT ) {
1381 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
1382 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
1383 if ( bufferBytes <= bytesOut ) makeBuffer = false;
1388 bufferBytes *= *bufferSize;
1389 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
1390 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
1391 if ( stream_.deviceBuffer == NULL ) {
1392 errorText_ = "RtApiCore::probeDeviceOpen: error allocating device buffer memory.";
1398 stream_.sampleRate = sampleRate;
1399 stream_.device[mode] = device;
1400 stream_.state = STREAM_STOPPED;
1401 stream_.callbackInfo.object = (void *) this;
1403 // Setup the buffer conversion information structure.
1404 if ( stream_.doConvertBuffer[mode] ) {
1405 if ( streamCount > 1 ) setConvertInfo( mode, 0 );
1406 else setConvertInfo( mode, channelOffset );
1409 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device )
1410 // Only one callback procedure per device.
1411 stream_.mode = DUPLEX;
1413 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1414 result = AudioDeviceCreateIOProcID( id, callbackHandler, (void *) &stream_.callbackInfo, &handle->procId[mode] );
1416 // deprecated in favor of AudioDeviceCreateIOProcID()
1417 result = AudioDeviceAddIOProc( id, callbackHandler, (void *) &stream_.callbackInfo );
1419 if ( result != noErr ) {
1420 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting callback for device (" << device << ").";
1421 errorText_ = errorStream_.str();
1424 if ( stream_.mode == OUTPUT && mode == INPUT )
1425 stream_.mode = DUPLEX;
1427 stream_.mode = mode;
1430 // Setup the device property listener for over/underload.
1431 property.mSelector = kAudioDeviceProcessorOverload;
1432 property.mScope = kAudioObjectPropertyScopeGlobal;
1433 result = AudioObjectAddPropertyListener( id, &property, xrunListener, (void *) handle );
1439 pthread_cond_destroy( &handle->condition );
1441 stream_.apiHandle = 0;
1444 for ( int i=0; i<2; i++ ) {
1445 if ( stream_.userBuffer[i] ) {
1446 free( stream_.userBuffer[i] );
1447 stream_.userBuffer[i] = 0;
1451 if ( stream_.deviceBuffer ) {
1452 free( stream_.deviceBuffer );
1453 stream_.deviceBuffer = 0;
1456 stream_.state = STREAM_CLOSED;
1460 void RtApiCore :: closeStream( void )
1462 if ( stream_.state == STREAM_CLOSED ) {
1463 errorText_ = "RtApiCore::closeStream(): no open stream to close!";
1464 error( RtAudioError::WARNING );
1468 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1469 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1471 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
1472 kAudioObjectPropertyScopeGlobal,
1473 kAudioObjectPropertyElementMaster };
1475 property.mSelector = kAudioDeviceProcessorOverload;
1476 property.mScope = kAudioObjectPropertyScopeGlobal;
1477 if (AudioObjectRemovePropertyListener( handle->id[0], &property, xrunListener, (void *) handle ) != noErr) {
1478 errorText_ = "RtApiCore::closeStream(): error removing property listener!";
1479 error( RtAudioError::WARNING );
1482 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1483 if ( stream_.state == STREAM_RUNNING )
1484 AudioDeviceStop( handle->id[0], handle->procId[0] );
1485 AudioDeviceDestroyIOProcID( handle->id[0], handle->procId[0] );
1486 #else // deprecated behaviour
1487 if ( stream_.state == STREAM_RUNNING )
1488 AudioDeviceStop( handle->id[0], callbackHandler );
1489 AudioDeviceRemoveIOProc( handle->id[0], callbackHandler );
1494 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1496 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
1497 kAudioObjectPropertyScopeGlobal,
1498 kAudioObjectPropertyElementMaster };
1500 property.mSelector = kAudioDeviceProcessorOverload;
1501 property.mScope = kAudioObjectPropertyScopeGlobal;
1502 if (AudioObjectRemovePropertyListener( handle->id[1], &property, xrunListener, (void *) handle ) != noErr) {
1503 errorText_ = "RtApiCore::closeStream(): error removing property listener!";
1504 error( RtAudioError::WARNING );
1507 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1508 if ( stream_.state == STREAM_RUNNING )
1509 AudioDeviceStop( handle->id[1], handle->procId[1] );
1510 AudioDeviceDestroyIOProcID( handle->id[1], handle->procId[1] );
1511 #else // deprecated behaviour
1512 if ( stream_.state == STREAM_RUNNING )
1513 AudioDeviceStop( handle->id[1], callbackHandler );
1514 AudioDeviceRemoveIOProc( handle->id[1], callbackHandler );
1519 for ( int i=0; i<2; i++ ) {
1520 if ( stream_.userBuffer[i] ) {
1521 free( stream_.userBuffer[i] );
1522 stream_.userBuffer[i] = 0;
1526 if ( stream_.deviceBuffer ) {
1527 free( stream_.deviceBuffer );
1528 stream_.deviceBuffer = 0;
1531 // Destroy pthread condition variable.
1532 pthread_cond_destroy( &handle->condition );
1534 stream_.apiHandle = 0;
1536 stream_.mode = UNINITIALIZED;
1537 stream_.state = STREAM_CLOSED;
1540 void RtApiCore :: startStream( void )
1543 if ( stream_.state == STREAM_RUNNING ) {
1544 errorText_ = "RtApiCore::startStream(): the stream is already running!";
1545 error( RtAudioError::WARNING );
1549 #if defined( HAVE_GETTIMEOFDAY )
1550 gettimeofday( &stream_.lastTickTimestamp, NULL );
1553 OSStatus result = noErr;
1554 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1555 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1557 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1558 result = AudioDeviceStart( handle->id[0], handle->procId[0] );
1559 #else // deprecated behaviour
1560 result = AudioDeviceStart( handle->id[0], callbackHandler );
1562 if ( result != noErr ) {
1563 errorStream_ << "RtApiCore::startStream: system error (" << getErrorCode( result ) << ") starting callback procedure on device (" << stream_.device[0] << ").";
1564 errorText_ = errorStream_.str();
1569 if ( stream_.mode == INPUT ||
1570 ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1572 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1573 result = AudioDeviceStart( handle->id[1], handle->procId[1] );
1574 #else // deprecated behaviour
1575 result = AudioDeviceStart( handle->id[1], callbackHandler );
1577 if ( result != noErr ) {
1578 errorStream_ << "RtApiCore::startStream: system error starting input callback procedure on device (" << stream_.device[1] << ").";
1579 errorText_ = errorStream_.str();
1584 handle->drainCounter = 0;
1585 handle->internalDrain = false;
1586 stream_.state = STREAM_RUNNING;
1589 if ( result == noErr ) return;
1590 error( RtAudioError::SYSTEM_ERROR );
1593 void RtApiCore :: stopStream( void )
1596 if ( stream_.state == STREAM_STOPPED ) {
1597 errorText_ = "RtApiCore::stopStream(): the stream is already stopped!";
1598 error( RtAudioError::WARNING );
1602 OSStatus result = noErr;
1603 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1604 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1606 if ( handle->drainCounter == 0 ) {
1607 handle->drainCounter = 2;
1608 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
1611 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1612 result = AudioDeviceStop( handle->id[0], handle->procId[0] );
1613 #else // deprecated behaviour
1614 result = AudioDeviceStop( handle->id[0], callbackHandler );
1616 if ( result != noErr ) {
1617 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping callback procedure on device (" << stream_.device[0] << ").";
1618 errorText_ = errorStream_.str();
1623 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1625 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1626 result = AudioDeviceStop( handle->id[0], handle->procId[1] );
1627 #else // deprecated behaviour
1628 result = AudioDeviceStop( handle->id[1], callbackHandler );
1630 if ( result != noErr ) {
1631 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping input callback procedure on device (" << stream_.device[1] << ").";
1632 errorText_ = errorStream_.str();
1637 stream_.state = STREAM_STOPPED;
1640 if ( result == noErr ) return;
1641 error( RtAudioError::SYSTEM_ERROR );
1644 void RtApiCore :: abortStream( void )
1647 if ( stream_.state == STREAM_STOPPED ) {
1648 errorText_ = "RtApiCore::abortStream(): the stream is already stopped!";
1649 error( RtAudioError::WARNING );
1653 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1654 handle->drainCounter = 2;
1659 // This function will be called by a spawned thread when the user
1660 // callback function signals that the stream should be stopped or
1661 // aborted. It is better to handle it this way because the
1662 // callbackEvent() function probably should return before the AudioDeviceStop()
1663 // function is called.
1664 static void *coreStopStream( void *ptr )
1666 CallbackInfo *info = (CallbackInfo *) ptr;
1667 RtApiCore *object = (RtApiCore *) info->object;
1669 object->stopStream();
1670 pthread_exit( NULL );
1673 bool RtApiCore :: callbackEvent( AudioDeviceID deviceId,
1674 const AudioBufferList *inBufferList,
1675 const AudioBufferList *outBufferList )
1677 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
1678 if ( stream_.state == STREAM_CLOSED ) {
1679 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
1680 error( RtAudioError::WARNING );
1684 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
1685 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1687 // Check if we were draining the stream and signal is finished.
1688 if ( handle->drainCounter > 3 ) {
1689 ThreadHandle threadId;
1691 stream_.state = STREAM_STOPPING;
1692 if ( handle->internalDrain == true )
1693 pthread_create( &threadId, NULL, coreStopStream, info );
1694 else // external call to stopStream()
1695 pthread_cond_signal( &handle->condition );
1699 AudioDeviceID outputDevice = handle->id[0];
1701 // Invoke user callback to get fresh output data UNLESS we are
1702 // draining stream or duplex mode AND the input/output devices are
1703 // different AND this function is called for the input device.
1704 if ( handle->drainCounter == 0 && ( stream_.mode != DUPLEX || deviceId == outputDevice ) ) {
1705 RtAudioCallback callback = (RtAudioCallback) info->callback;
1706 double streamTime = getStreamTime();
1707 RtAudioStreamStatus status = 0;
1708 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
1709 status |= RTAUDIO_OUTPUT_UNDERFLOW;
1710 handle->xrun[0] = false;
1712 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
1713 status |= RTAUDIO_INPUT_OVERFLOW;
1714 handle->xrun[1] = false;
1717 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
1718 stream_.bufferSize, streamTime, status, info->userData );
1719 if ( cbReturnValue == 2 ) {
1720 stream_.state = STREAM_STOPPING;
1721 handle->drainCounter = 2;
1725 else if ( cbReturnValue == 1 ) {
1726 handle->drainCounter = 1;
1727 handle->internalDrain = true;
1731 if ( stream_.mode == OUTPUT || ( stream_.mode == DUPLEX && deviceId == outputDevice ) ) {
1733 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
1735 if ( handle->nStreams[0] == 1 ) {
1736 memset( outBufferList->mBuffers[handle->iStream[0]].mData,
1738 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
1740 else { // fill multiple streams with zeros
1741 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
1742 memset( outBufferList->mBuffers[handle->iStream[0]+i].mData,
1744 outBufferList->mBuffers[handle->iStream[0]+i].mDataByteSize );
1748 else if ( handle->nStreams[0] == 1 ) {
1749 if ( stream_.doConvertBuffer[0] ) { // convert directly to CoreAudio stream buffer
1750 convertBuffer( (char *) outBufferList->mBuffers[handle->iStream[0]].mData,
1751 stream_.userBuffer[0], stream_.convertInfo[0] );
1753 else { // copy from user buffer
1754 memcpy( outBufferList->mBuffers[handle->iStream[0]].mData,
1755 stream_.userBuffer[0],
1756 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
1759 else { // fill multiple streams
1760 Float32 *inBuffer = (Float32 *) stream_.userBuffer[0];
1761 if ( stream_.doConvertBuffer[0] ) {
1762 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
1763 inBuffer = (Float32 *) stream_.deviceBuffer;
1766 if ( stream_.deviceInterleaved[0] == false ) { // mono mode
1767 UInt32 bufferBytes = outBufferList->mBuffers[handle->iStream[0]].mDataByteSize;
1768 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
1769 memcpy( outBufferList->mBuffers[handle->iStream[0]+i].mData,
1770 (void *)&inBuffer[i*stream_.bufferSize], bufferBytes );
1773 else { // fill multiple multi-channel streams with interleaved data
1774 UInt32 streamChannels, channelsLeft, inJump, outJump, inOffset;
1777 bool inInterleaved = ( stream_.userInterleaved ) ? true : false;
1778 UInt32 inChannels = stream_.nUserChannels[0];
1779 if ( stream_.doConvertBuffer[0] ) {
1780 inInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
1781 inChannels = stream_.nDeviceChannels[0];
1784 if ( inInterleaved ) inOffset = 1;
1785 else inOffset = stream_.bufferSize;
1787 channelsLeft = inChannels;
1788 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
1790 out = (Float32 *) outBufferList->mBuffers[handle->iStream[0]+i].mData;
1791 streamChannels = outBufferList->mBuffers[handle->iStream[0]+i].mNumberChannels;
1794 // Account for possible channel offset in first stream
1795 if ( i == 0 && stream_.channelOffset[0] > 0 ) {
1796 streamChannels -= stream_.channelOffset[0];
1797 outJump = stream_.channelOffset[0];
1801 // Account for possible unfilled channels at end of the last stream
1802 if ( streamChannels > channelsLeft ) {
1803 outJump = streamChannels - channelsLeft;
1804 streamChannels = channelsLeft;
1807 // Determine input buffer offsets and skips
1808 if ( inInterleaved ) {
1809 inJump = inChannels;
1810 in += inChannels - channelsLeft;
1814 in += (inChannels - channelsLeft) * inOffset;
1817 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
1818 for ( unsigned int j=0; j<streamChannels; j++ ) {
1819 *out++ = in[j*inOffset];
1824 channelsLeft -= streamChannels;
1830 // Don't bother draining input
1831 if ( handle->drainCounter ) {
1832 handle->drainCounter++;
1836 AudioDeviceID inputDevice;
1837 inputDevice = handle->id[1];
1838 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && deviceId == inputDevice ) ) {
1840 if ( handle->nStreams[1] == 1 ) {
1841 if ( stream_.doConvertBuffer[1] ) { // convert directly from CoreAudio stream buffer
1842 convertBuffer( stream_.userBuffer[1],
1843 (char *) inBufferList->mBuffers[handle->iStream[1]].mData,
1844 stream_.convertInfo[1] );
1846 else { // copy to user buffer
1847 memcpy( stream_.userBuffer[1],
1848 inBufferList->mBuffers[handle->iStream[1]].mData,
1849 inBufferList->mBuffers[handle->iStream[1]].mDataByteSize );
1852 else { // read from multiple streams
1853 Float32 *outBuffer = (Float32 *) stream_.userBuffer[1];
1854 if ( stream_.doConvertBuffer[1] ) outBuffer = (Float32 *) stream_.deviceBuffer;
1856 if ( stream_.deviceInterleaved[1] == false ) { // mono mode
1857 UInt32 bufferBytes = inBufferList->mBuffers[handle->iStream[1]].mDataByteSize;
1858 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
1859 memcpy( (void *)&outBuffer[i*stream_.bufferSize],
1860 inBufferList->mBuffers[handle->iStream[1]+i].mData, bufferBytes );
1863 else { // read from multiple multi-channel streams
1864 UInt32 streamChannels, channelsLeft, inJump, outJump, outOffset;
1867 bool outInterleaved = ( stream_.userInterleaved ) ? true : false;
1868 UInt32 outChannels = stream_.nUserChannels[1];
1869 if ( stream_.doConvertBuffer[1] ) {
1870 outInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
1871 outChannels = stream_.nDeviceChannels[1];
1874 if ( outInterleaved ) outOffset = 1;
1875 else outOffset = stream_.bufferSize;
1877 channelsLeft = outChannels;
1878 for ( unsigned int i=0; i<handle->nStreams[1]; i++ ) {
1880 in = (Float32 *) inBufferList->mBuffers[handle->iStream[1]+i].mData;
1881 streamChannels = inBufferList->mBuffers[handle->iStream[1]+i].mNumberChannels;
1884 // Account for possible channel offset in first stream
1885 if ( i == 0 && stream_.channelOffset[1] > 0 ) {
1886 streamChannels -= stream_.channelOffset[1];
1887 inJump = stream_.channelOffset[1];
1891 // Account for possible unread channels at end of the last stream
1892 if ( streamChannels > channelsLeft ) {
1893 inJump = streamChannels - channelsLeft;
1894 streamChannels = channelsLeft;
1897 // Determine output buffer offsets and skips
1898 if ( outInterleaved ) {
1899 outJump = outChannels;
1900 out += outChannels - channelsLeft;
1904 out += (outChannels - channelsLeft) * outOffset;
1907 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
1908 for ( unsigned int j=0; j<streamChannels; j++ ) {
1909 out[j*outOffset] = *in++;
1914 channelsLeft -= streamChannels;
1918 if ( stream_.doConvertBuffer[1] ) { // convert from our internal "device" buffer
1919 convertBuffer( stream_.userBuffer[1],
1920 stream_.deviceBuffer,
1921 stream_.convertInfo[1] );
1927 //MUTEX_UNLOCK( &stream_.mutex );
1929 // Make sure to only tick duplex stream time once if using two devices
1930 if ( stream_.mode != DUPLEX || (stream_.mode == DUPLEX && handle->id[0] != handle->id[1] && deviceId == handle->id[0] ) )
1931 RtApi::tickStreamTime();
1936 const char* RtApiCore :: getErrorCode( OSStatus code )
1940 case kAudioHardwareNotRunningError:
1941 return "kAudioHardwareNotRunningError";
1943 case kAudioHardwareUnspecifiedError:
1944 return "kAudioHardwareUnspecifiedError";
1946 case kAudioHardwareUnknownPropertyError:
1947 return "kAudioHardwareUnknownPropertyError";
1949 case kAudioHardwareBadPropertySizeError:
1950 return "kAudioHardwareBadPropertySizeError";
1952 case kAudioHardwareIllegalOperationError:
1953 return "kAudioHardwareIllegalOperationError";
1955 case kAudioHardwareBadObjectError:
1956 return "kAudioHardwareBadObjectError";
1958 case kAudioHardwareBadDeviceError:
1959 return "kAudioHardwareBadDeviceError";
1961 case kAudioHardwareBadStreamError:
1962 return "kAudioHardwareBadStreamError";
1964 case kAudioHardwareUnsupportedOperationError:
1965 return "kAudioHardwareUnsupportedOperationError";
1967 case kAudioDeviceUnsupportedFormatError:
1968 return "kAudioDeviceUnsupportedFormatError";
1970 case kAudioDevicePermissionsError:
1971 return "kAudioDevicePermissionsError";
1974 return "CoreAudio unknown error";
1978 //******************** End of __MACOSX_CORE__ *********************//
1981 #if defined(__UNIX_JACK__)
1983 // JACK is a low-latency audio server, originally written for the
1984 // GNU/Linux operating system and now also ported to OS-X. It can
1985 // connect a number of different applications to an audio device, as
1986 // well as allowing them to share audio between themselves.
1988 // When using JACK with RtAudio, "devices" refer to JACK clients that
1989 // have ports connected to the server. The JACK server is typically
1990 // started in a terminal as follows:
1992 // .jackd -d alsa -d hw:0
1994 // or through an interface program such as qjackctl. Many of the
1995 // parameters normally set for a stream are fixed by the JACK server
1996 // and can be specified when the JACK server is started. In
1999 // .jackd -d alsa -d hw:0 -r 44100 -p 512 -n 4
2001 // specifies a sample rate of 44100 Hz, a buffer size of 512 sample
2002 // frames, and number of buffers = 4. Once the server is running, it
2003 // is not possible to override these values. If the values are not
2004 // specified in the command-line, the JACK server uses default values.
2006 // The JACK server does not have to be running when an instance of
2007 // RtApiJack is created, though the function getDeviceCount() will
2008 // report 0 devices found until JACK has been started. When no
2009 // devices are available (i.e., the JACK server is not running), a
2010 // stream cannot be opened.
2012 #include <jack/jack.h>
2016 // A structure to hold various information related to the Jack API
2019 jack_client_t *client;
2020 jack_port_t **ports[2];
2021 std::string deviceName[2];
2023 pthread_cond_t condition;
2024 int drainCounter; // Tracks callback counts when draining
2025 bool internalDrain; // Indicates if stop is initiated from callback or not.
2028 :client(0), drainCounter(0), internalDrain(false) { ports[0] = 0; ports[1] = 0; xrun[0] = false; xrun[1] = false; }
2031 #if !defined(__RTAUDIO_DEBUG__)
2032 static void jackSilentError( const char * ) {};
2035 RtApiJack :: RtApiJack()
2036 :shouldAutoconnect_(true) {
2037 // Nothing to do here.
2038 #if !defined(__RTAUDIO_DEBUG__)
2039 // Turn off Jack's internal error reporting.
2040 jack_set_error_function( &jackSilentError );
2044 RtApiJack :: ~RtApiJack()
2046 if ( stream_.state != STREAM_CLOSED ) closeStream();
2049 unsigned int RtApiJack :: getDeviceCount( void )
2051 // See if we can become a jack client.
2052 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
2053 jack_status_t *status = NULL;
2054 jack_client_t *client = jack_client_open( "RtApiJackCount", options, status );
2055 if ( client == 0 ) return 0;
2058 std::string port, previousPort;
2059 unsigned int nChannels = 0, nDevices = 0;
2060 ports = jack_get_ports( client, NULL, JACK_DEFAULT_AUDIO_TYPE, 0 );
2062 // Parse the port names up to the first colon (:).
2065 port = (char *) ports[ nChannels ];
2066 iColon = port.find(":");
2067 if ( iColon != std::string::npos ) {
2068 port = port.substr( 0, iColon + 1 );
2069 if ( port != previousPort ) {
2071 previousPort = port;
2074 } while ( ports[++nChannels] );
2078 jack_client_close( client );
2082 RtAudio::DeviceInfo RtApiJack :: getDeviceInfo( unsigned int device )
2084 RtAudio::DeviceInfo info;
2085 info.probed = false;
2087 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption
2088 jack_status_t *status = NULL;
2089 jack_client_t *client = jack_client_open( "RtApiJackInfo", options, status );
2090 if ( client == 0 ) {
2091 errorText_ = "RtApiJack::getDeviceInfo: Jack server not found or connection error!";
2092 error( RtAudioError::WARNING );
2097 std::string port, previousPort;
2098 unsigned int nPorts = 0, nDevices = 0;
2099 ports = jack_get_ports( client, NULL, JACK_DEFAULT_AUDIO_TYPE, 0 );
2101 // Parse the port names up to the first colon (:).
2104 port = (char *) ports[ nPorts ];
2105 iColon = port.find(":");
2106 if ( iColon != std::string::npos ) {
2107 port = port.substr( 0, iColon );
2108 if ( port != previousPort ) {
2109 if ( nDevices == device ) info.name = port;
2111 previousPort = port;
2114 } while ( ports[++nPorts] );
2118 if ( device >= nDevices ) {
2119 jack_client_close( client );
2120 errorText_ = "RtApiJack::getDeviceInfo: device ID is invalid!";
2121 error( RtAudioError::INVALID_USE );
2125 // Get the current jack server sample rate.
2126 info.sampleRates.clear();
2128 info.preferredSampleRate = jack_get_sample_rate( client );
2129 info.sampleRates.push_back( info.preferredSampleRate );
2131 // Count the available ports containing the client name as device
2132 // channels. Jack "input ports" equal RtAudio output channels.
2133 unsigned int nChannels = 0;
2134 ports = jack_get_ports( client, info.name.c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput );
2136 while ( ports[ nChannels ] ) nChannels++;
2138 info.outputChannels = nChannels;
2141 // Jack "output ports" equal RtAudio input channels.
2143 ports = jack_get_ports( client, info.name.c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput );
2145 while ( ports[ nChannels ] ) nChannels++;
2147 info.inputChannels = nChannels;
2150 if ( info.outputChannels == 0 && info.inputChannels == 0 ) {
2151 jack_client_close(client);
2152 errorText_ = "RtApiJack::getDeviceInfo: error determining Jack input/output channels!";
2153 error( RtAudioError::WARNING );
2157 // If device opens for both playback and capture, we determine the channels.
2158 if ( info.outputChannels > 0 && info.inputChannels > 0 )
2159 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
2161 // Jack always uses 32-bit floats.
2162 info.nativeFormats = RTAUDIO_FLOAT32;
2164 // Jack doesn't provide default devices so we'll use the first available one.
2165 if ( device == 0 && info.outputChannels > 0 )
2166 info.isDefaultOutput = true;
2167 if ( device == 0 && info.inputChannels > 0 )
2168 info.isDefaultInput = true;
2170 jack_client_close(client);
2175 static int jackCallbackHandler( jack_nframes_t nframes, void *infoPointer )
2177 CallbackInfo *info = (CallbackInfo *) infoPointer;
2179 RtApiJack *object = (RtApiJack *) info->object;
2180 if ( object->callbackEvent( (unsigned long) nframes ) == false ) return 1;
2185 // This function will be called by a spawned thread when the Jack
2186 // server signals that it is shutting down. It is necessary to handle
2187 // it this way because the jackShutdown() function must return before
2188 // the jack_deactivate() function (in closeStream()) will return.
2189 static void *jackCloseStream( void *ptr )
2191 CallbackInfo *info = (CallbackInfo *) ptr;
2192 RtApiJack *object = (RtApiJack *) info->object;
2194 object->closeStream();
2196 pthread_exit( NULL );
2198 static void jackShutdown( void *infoPointer )
2200 CallbackInfo *info = (CallbackInfo *) infoPointer;
2201 RtApiJack *object = (RtApiJack *) info->object;
2203 // Check current stream state. If stopped, then we'll assume this
2204 // was called as a result of a call to RtApiJack::stopStream (the
2205 // deactivation of a client handle causes this function to be called).
2206 // If not, we'll assume the Jack server is shutting down or some
2207 // other problem occurred and we should close the stream.
2208 if ( object->isStreamRunning() == false ) return;
2210 ThreadHandle threadId;
2211 pthread_create( &threadId, NULL, jackCloseStream, info );
2212 std::cerr << "\nRtApiJack: the Jack server is shutting down this client ... stream stopped and closed!!\n" << std::endl;
2215 static int jackXrun( void *infoPointer )
2217 JackHandle *handle = *((JackHandle **) infoPointer);
2219 if ( handle->ports[0] ) handle->xrun[0] = true;
2220 if ( handle->ports[1] ) handle->xrun[1] = true;
2225 bool RtApiJack :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
2226 unsigned int firstChannel, unsigned int sampleRate,
2227 RtAudioFormat format, unsigned int *bufferSize,
2228 RtAudio::StreamOptions *options )
2230 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2232 // Look for jack server and try to become a client (only do once per stream).
2233 jack_client_t *client = 0;
2234 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) {
2235 jack_options_t jackoptions = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
2236 jack_status_t *status = NULL;
2237 if ( options && !options->streamName.empty() )
2238 client = jack_client_open( options->streamName.c_str(), jackoptions, status );
2240 client = jack_client_open( "RtApiJack", jackoptions, status );
2241 if ( client == 0 ) {
2242 errorText_ = "RtApiJack::probeDeviceOpen: Jack server not found or connection error!";
2243 error( RtAudioError::WARNING );
2248 // The handle must have been created on an earlier pass.
2249 client = handle->client;
2253 std::string port, previousPort, deviceName;
2254 unsigned int nPorts = 0, nDevices = 0;
2255 ports = jack_get_ports( client, NULL, JACK_DEFAULT_AUDIO_TYPE, 0 );
2257 // Parse the port names up to the first colon (:).
2260 port = (char *) ports[ nPorts ];
2261 iColon = port.find(":");
2262 if ( iColon != std::string::npos ) {
2263 port = port.substr( 0, iColon );
2264 if ( port != previousPort ) {
2265 if ( nDevices == device ) deviceName = port;
2267 previousPort = port;
2270 } while ( ports[++nPorts] );
2274 if ( device >= nDevices ) {
2275 errorText_ = "RtApiJack::probeDeviceOpen: device ID is invalid!";
2279 unsigned long flag = JackPortIsInput;
2280 if ( mode == INPUT ) flag = JackPortIsOutput;
2282 if ( ! (options && (options->flags & RTAUDIO_JACK_DONT_CONNECT)) ) {
2283 // Count the available ports containing the client name as device
2284 // channels. Jack "input ports" equal RtAudio output channels.
2285 unsigned int nChannels = 0;
2286 ports = jack_get_ports( client, deviceName.c_str(), JACK_DEFAULT_AUDIO_TYPE, flag );
2288 while ( ports[ nChannels ] ) nChannels++;
2291 // Compare the jack ports for specified client to the requested number of channels.
2292 if ( nChannels < (channels + firstChannel) ) {
2293 errorStream_ << "RtApiJack::probeDeviceOpen: requested number of channels (" << channels << ") + offset (" << firstChannel << ") not found for specified device (" << device << ":" << deviceName << ").";
2294 errorText_ = errorStream_.str();
2299 // Check the jack server sample rate.
2300 unsigned int jackRate = jack_get_sample_rate( client );
2301 if ( sampleRate != jackRate ) {
2302 jack_client_close( client );
2303 errorStream_ << "RtApiJack::probeDeviceOpen: the requested sample rate (" << sampleRate << ") is different than the JACK server rate (" << jackRate << ").";
2304 errorText_ = errorStream_.str();
2307 stream_.sampleRate = jackRate;
2309 // Get the latency of the JACK port.
2310 ports = jack_get_ports( client, deviceName.c_str(), JACK_DEFAULT_AUDIO_TYPE, flag );
2311 if ( ports[ firstChannel ] ) {
2313 jack_latency_callback_mode_t cbmode = (mode == INPUT ? JackCaptureLatency : JackPlaybackLatency);
2314 // the range (usually the min and max are equal)
2315 jack_latency_range_t latrange; latrange.min = latrange.max = 0;
2316 // get the latency range
2317 jack_port_get_latency_range( jack_port_by_name( client, ports[firstChannel] ), cbmode, &latrange );
2318 // be optimistic, use the min!
2319 stream_.latency[mode] = latrange.min;
2320 //stream_.latency[mode] = jack_port_get_latency( jack_port_by_name( client, ports[ firstChannel ] ) );
2324 // The jack server always uses 32-bit floating-point data.
2325 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
2326 stream_.userFormat = format;
2328 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
2329 else stream_.userInterleaved = true;
2331 // Jack always uses non-interleaved buffers.
2332 stream_.deviceInterleaved[mode] = false;
2334 // Jack always provides host byte-ordered data.
2335 stream_.doByteSwap[mode] = false;
2337 // Get the buffer size. The buffer size and number of buffers
2338 // (periods) is set when the jack server is started.
2339 stream_.bufferSize = (int) jack_get_buffer_size( client );
2340 *bufferSize = stream_.bufferSize;
2342 stream_.nDeviceChannels[mode] = channels;
2343 stream_.nUserChannels[mode] = channels;
2345 // Set flags for buffer conversion.
2346 stream_.doConvertBuffer[mode] = false;
2347 if ( stream_.userFormat != stream_.deviceFormat[mode] )
2348 stream_.doConvertBuffer[mode] = true;
2349 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
2350 stream_.nUserChannels[mode] > 1 )
2351 stream_.doConvertBuffer[mode] = true;
2353 // Allocate our JackHandle structure for the stream.
2354 if ( handle == 0 ) {
2356 handle = new JackHandle;
2358 catch ( std::bad_alloc& ) {
2359 errorText_ = "RtApiJack::probeDeviceOpen: error allocating JackHandle memory.";
2363 if ( pthread_cond_init(&handle->condition, NULL) ) {
2364 errorText_ = "RtApiJack::probeDeviceOpen: error initializing pthread condition variable.";
2367 stream_.apiHandle = (void *) handle;
2368 handle->client = client;
2370 handle->deviceName[mode] = deviceName;
2372 // Allocate necessary internal buffers.
2373 unsigned long bufferBytes;
2374 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
2375 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
2376 if ( stream_.userBuffer[mode] == NULL ) {
2377 errorText_ = "RtApiJack::probeDeviceOpen: error allocating user buffer memory.";
2381 if ( stream_.doConvertBuffer[mode] ) {
2383 bool makeBuffer = true;
2384 if ( mode == OUTPUT )
2385 bufferBytes = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
2386 else { // mode == INPUT
2387 bufferBytes = stream_.nDeviceChannels[1] * formatBytes( stream_.deviceFormat[1] );
2388 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
2389 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes(stream_.deviceFormat[0]);
2390 if ( bufferBytes < bytesOut ) makeBuffer = false;
2395 bufferBytes *= *bufferSize;
2396 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
2397 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
2398 if ( stream_.deviceBuffer == NULL ) {
2399 errorText_ = "RtApiJack::probeDeviceOpen: error allocating device buffer memory.";
2405 // Allocate memory for the Jack ports (channels) identifiers.
2406 handle->ports[mode] = (jack_port_t **) malloc ( sizeof (jack_port_t *) * channels );
2407 if ( handle->ports[mode] == NULL ) {
2408 errorText_ = "RtApiJack::probeDeviceOpen: error allocating port memory.";
2412 stream_.device[mode] = device;
2413 stream_.channelOffset[mode] = firstChannel;
2414 stream_.state = STREAM_STOPPED;
2415 stream_.callbackInfo.object = (void *) this;
2417 if ( stream_.mode == OUTPUT && mode == INPUT )
2418 // We had already set up the stream for output.
2419 stream_.mode = DUPLEX;
2421 stream_.mode = mode;
2422 jack_set_process_callback( handle->client, jackCallbackHandler, (void *) &stream_.callbackInfo );
2423 jack_set_xrun_callback( handle->client, jackXrun, (void *) &stream_.apiHandle );
2424 jack_on_shutdown( handle->client, jackShutdown, (void *) &stream_.callbackInfo );
2427 // Register our ports.
2429 if ( mode == OUTPUT ) {
2430 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2431 snprintf( label, 64, "outport %d", i );
2432 handle->ports[0][i] = jack_port_register( handle->client, (const char *)label,
2433 JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput, 0 );
2437 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2438 snprintf( label, 64, "inport %d", i );
2439 handle->ports[1][i] = jack_port_register( handle->client, (const char *)label,
2440 JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput, 0 );
2444 // Setup the buffer conversion information structure. We don't use
2445 // buffers to do channel offsets, so we override that parameter
2447 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
2449 if ( options && options->flags & RTAUDIO_JACK_DONT_CONNECT ) shouldAutoconnect_ = false;
2455 pthread_cond_destroy( &handle->condition );
2456 jack_client_close( handle->client );
2458 if ( handle->ports[0] ) free( handle->ports[0] );
2459 if ( handle->ports[1] ) free( handle->ports[1] );
2462 stream_.apiHandle = 0;
2465 for ( int i=0; i<2; i++ ) {
2466 if ( stream_.userBuffer[i] ) {
2467 free( stream_.userBuffer[i] );
2468 stream_.userBuffer[i] = 0;
2472 if ( stream_.deviceBuffer ) {
2473 free( stream_.deviceBuffer );
2474 stream_.deviceBuffer = 0;
2480 void RtApiJack :: closeStream( void )
2482 if ( stream_.state == STREAM_CLOSED ) {
2483 errorText_ = "RtApiJack::closeStream(): no open stream to close!";
2484 error( RtAudioError::WARNING );
2488 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2491 if ( stream_.state == STREAM_RUNNING )
2492 jack_deactivate( handle->client );
2494 jack_client_close( handle->client );
2498 if ( handle->ports[0] ) free( handle->ports[0] );
2499 if ( handle->ports[1] ) free( handle->ports[1] );
2500 pthread_cond_destroy( &handle->condition );
2502 stream_.apiHandle = 0;
2505 for ( int i=0; i<2; i++ ) {
2506 if ( stream_.userBuffer[i] ) {
2507 free( stream_.userBuffer[i] );
2508 stream_.userBuffer[i] = 0;
2512 if ( stream_.deviceBuffer ) {
2513 free( stream_.deviceBuffer );
2514 stream_.deviceBuffer = 0;
2517 stream_.mode = UNINITIALIZED;
2518 stream_.state = STREAM_CLOSED;
2521 void RtApiJack :: startStream( void )
2524 if ( stream_.state == STREAM_RUNNING ) {
2525 errorText_ = "RtApiJack::startStream(): the stream is already running!";
2526 error( RtAudioError::WARNING );
2530 #if defined( HAVE_GETTIMEOFDAY )
2531 gettimeofday( &stream_.lastTickTimestamp, NULL );
2534 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2535 int result = jack_activate( handle->client );
2537 errorText_ = "RtApiJack::startStream(): unable to activate JACK client!";
2543 // Get the list of available ports.
2544 if ( shouldAutoconnect_ && (stream_.mode == OUTPUT || stream_.mode == DUPLEX) ) {
2546 ports = jack_get_ports( handle->client, handle->deviceName[0].c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput);
2547 if ( ports == NULL) {
2548 errorText_ = "RtApiJack::startStream(): error determining available JACK input ports!";
2552 // Now make the port connections. Since RtAudio wasn't designed to
2553 // allow the user to select particular channels of a device, we'll
2554 // just open the first "nChannels" ports with offset.
2555 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2557 if ( ports[ stream_.channelOffset[0] + i ] )
2558 result = jack_connect( handle->client, jack_port_name( handle->ports[0][i] ), ports[ stream_.channelOffset[0] + i ] );
2561 errorText_ = "RtApiJack::startStream(): error connecting output ports!";
2568 if ( shouldAutoconnect_ && (stream_.mode == INPUT || stream_.mode == DUPLEX) ) {
2570 ports = jack_get_ports( handle->client, handle->deviceName[1].c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput );
2571 if ( ports == NULL) {
2572 errorText_ = "RtApiJack::startStream(): error determining available JACK output ports!";
2576 // Now make the port connections. See note above.
2577 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2579 if ( ports[ stream_.channelOffset[1] + i ] )
2580 result = jack_connect( handle->client, ports[ stream_.channelOffset[1] + i ], jack_port_name( handle->ports[1][i] ) );
2583 errorText_ = "RtApiJack::startStream(): error connecting input ports!";
2590 handle->drainCounter = 0;
2591 handle->internalDrain = false;
2592 stream_.state = STREAM_RUNNING;
2595 if ( result == 0 ) return;
2596 error( RtAudioError::SYSTEM_ERROR );
2599 void RtApiJack :: stopStream( void )
2602 if ( stream_.state == STREAM_STOPPED ) {
2603 errorText_ = "RtApiJack::stopStream(): the stream is already stopped!";
2604 error( RtAudioError::WARNING );
2608 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2609 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
2611 if ( handle->drainCounter == 0 ) {
2612 handle->drainCounter = 2;
2613 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
2617 jack_deactivate( handle->client );
2618 stream_.state = STREAM_STOPPED;
2621 void RtApiJack :: abortStream( void )
2624 if ( stream_.state == STREAM_STOPPED ) {
2625 errorText_ = "RtApiJack::abortStream(): the stream is already stopped!";
2626 error( RtAudioError::WARNING );
2630 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2631 handle->drainCounter = 2;
2636 // This function will be called by a spawned thread when the user
2637 // callback function signals that the stream should be stopped or
2638 // aborted. It is necessary to handle it this way because the
2639 // callbackEvent() function must return before the jack_deactivate()
2640 // function will return.
2641 static void *jackStopStream( void *ptr )
2643 CallbackInfo *info = (CallbackInfo *) ptr;
2644 RtApiJack *object = (RtApiJack *) info->object;
2646 object->stopStream();
2647 pthread_exit( NULL );
2650 bool RtApiJack :: callbackEvent( unsigned long nframes )
2652 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
2653 if ( stream_.state == STREAM_CLOSED ) {
2654 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
2655 error( RtAudioError::WARNING );
2658 if ( stream_.bufferSize != nframes ) {
2659 errorText_ = "RtApiCore::callbackEvent(): the JACK buffer size has changed ... cannot process!";
2660 error( RtAudioError::WARNING );
2664 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
2665 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2667 // Check if we were draining the stream and signal is finished.
2668 if ( handle->drainCounter > 3 ) {
2669 ThreadHandle threadId;
2671 stream_.state = STREAM_STOPPING;
2672 if ( handle->internalDrain == true )
2673 pthread_create( &threadId, NULL, jackStopStream, info );
2675 pthread_cond_signal( &handle->condition );
2679 // Invoke user callback first, to get fresh output data.
2680 if ( handle->drainCounter == 0 ) {
2681 RtAudioCallback callback = (RtAudioCallback) info->callback;
2682 double streamTime = getStreamTime();
2683 RtAudioStreamStatus status = 0;
2684 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
2685 status |= RTAUDIO_OUTPUT_UNDERFLOW;
2686 handle->xrun[0] = false;
2688 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
2689 status |= RTAUDIO_INPUT_OVERFLOW;
2690 handle->xrun[1] = false;
2692 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
2693 stream_.bufferSize, streamTime, status, info->userData );
2694 if ( cbReturnValue == 2 ) {
2695 stream_.state = STREAM_STOPPING;
2696 handle->drainCounter = 2;
2698 pthread_create( &id, NULL, jackStopStream, info );
2701 else if ( cbReturnValue == 1 ) {
2702 handle->drainCounter = 1;
2703 handle->internalDrain = true;
2707 jack_default_audio_sample_t *jackbuffer;
2708 unsigned long bufferBytes = nframes * sizeof( jack_default_audio_sample_t );
2709 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
2711 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
2713 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
2714 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2715 memset( jackbuffer, 0, bufferBytes );
2719 else if ( stream_.doConvertBuffer[0] ) {
2721 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
2723 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
2724 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2725 memcpy( jackbuffer, &stream_.deviceBuffer[i*bufferBytes], bufferBytes );
2728 else { // no buffer conversion
2729 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2730 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2731 memcpy( jackbuffer, &stream_.userBuffer[0][i*bufferBytes], bufferBytes );
2736 // Don't bother draining input
2737 if ( handle->drainCounter ) {
2738 handle->drainCounter++;
2742 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
2744 if ( stream_.doConvertBuffer[1] ) {
2745 for ( unsigned int i=0; i<stream_.nDeviceChannels[1]; i++ ) {
2746 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
2747 memcpy( &stream_.deviceBuffer[i*bufferBytes], jackbuffer, bufferBytes );
2749 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
2751 else { // no buffer conversion
2752 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2753 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
2754 memcpy( &stream_.userBuffer[1][i*bufferBytes], jackbuffer, bufferBytes );
2760 RtApi::tickStreamTime();
2763 //******************** End of __UNIX_JACK__ *********************//
2766 #if defined(__WINDOWS_ASIO__) // ASIO API on Windows
2768 // The ASIO API is designed around a callback scheme, so this
2769 // implementation is similar to that used for OS-X CoreAudio and Linux
2770 // Jack. The primary constraint with ASIO is that it only allows
2771 // access to a single driver at a time. Thus, it is not possible to
2772 // have more than one simultaneous RtAudio stream.
2774 // This implementation also requires a number of external ASIO files
2775 // and a few global variables. The ASIO callback scheme does not
2776 // allow for the passing of user data, so we must create a global
2777 // pointer to our callbackInfo structure.
2779 // On unix systems, we make use of a pthread condition variable.
2780 // Since there is no equivalent in Windows, I hacked something based
2781 // on information found in
2782 // http://www.cs.wustl.edu/~schmidt/win32-cv-1.html.
2784 #include "asiosys.h"
2786 #include "iasiothiscallresolver.h"
2787 #include "asiodrivers.h"
2790 static AsioDrivers drivers;
2791 static ASIOCallbacks asioCallbacks;
2792 static ASIODriverInfo driverInfo;
2793 static CallbackInfo *asioCallbackInfo;
2794 static bool asioXRun;
2797 int drainCounter; // Tracks callback counts when draining
2798 bool internalDrain; // Indicates if stop is initiated from callback or not.
2799 ASIOBufferInfo *bufferInfos;
2803 :drainCounter(0), internalDrain(false), bufferInfos(0) {}
2806 // Function declarations (definitions at end of section)
2807 static const char* getAsioErrorString( ASIOError result );
2808 static void sampleRateChanged( ASIOSampleRate sRate );
2809 static long asioMessages( long selector, long value, void* message, double* opt );
2811 RtApiAsio :: RtApiAsio()
2813 // ASIO cannot run on a multi-threaded appartment. You can call
2814 // CoInitialize beforehand, but it must be for appartment threading
2815 // (in which case, CoInitilialize will return S_FALSE here).
2816 coInitialized_ = false;
2817 HRESULT hr = CoInitialize( NULL );
2819 errorText_ = "RtApiAsio::ASIO requires a single-threaded appartment. Call CoInitializeEx(0,COINIT_APARTMENTTHREADED)";
2820 error( RtAudioError::WARNING );
2822 coInitialized_ = true;
2824 drivers.removeCurrentDriver();
2825 driverInfo.asioVersion = 2;
2827 // See note in DirectSound implementation about GetDesktopWindow().
2828 driverInfo.sysRef = GetForegroundWindow();
2831 RtApiAsio :: ~RtApiAsio()
2833 if ( stream_.state != STREAM_CLOSED ) closeStream();
2834 if ( coInitialized_ ) CoUninitialize();
2837 unsigned int RtApiAsio :: getDeviceCount( void )
2839 return (unsigned int) drivers.asioGetNumDev();
2842 RtAudio::DeviceInfo RtApiAsio :: getDeviceInfo( unsigned int device )
2844 RtAudio::DeviceInfo info;
2845 info.probed = false;
2848 unsigned int nDevices = getDeviceCount();
2849 if ( nDevices == 0 ) {
2850 errorText_ = "RtApiAsio::getDeviceInfo: no devices found!";
2851 error( RtAudioError::INVALID_USE );
2855 if ( device >= nDevices ) {
2856 errorText_ = "RtApiAsio::getDeviceInfo: device ID is invalid!";
2857 error( RtAudioError::INVALID_USE );
2861 // If a stream is already open, we cannot probe other devices. Thus, use the saved results.
2862 if ( stream_.state != STREAM_CLOSED ) {
2863 if ( device >= devices_.size() ) {
2864 errorText_ = "RtApiAsio::getDeviceInfo: device ID was not present before stream was opened.";
2865 error( RtAudioError::WARNING );
2868 return devices_[ device ];
2871 char driverName[32];
2872 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
2873 if ( result != ASE_OK ) {
2874 errorStream_ << "RtApiAsio::getDeviceInfo: unable to get driver name (" << getAsioErrorString( result ) << ").";
2875 errorText_ = errorStream_.str();
2876 error( RtAudioError::WARNING );
2880 info.name = driverName;
2882 if ( !drivers.loadDriver( driverName ) ) {
2883 errorStream_ << "RtApiAsio::getDeviceInfo: unable to load driver (" << driverName << ").";
2884 errorText_ = errorStream_.str();
2885 error( RtAudioError::WARNING );
2889 result = ASIOInit( &driverInfo );
2890 if ( result != ASE_OK ) {
2891 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
2892 errorText_ = errorStream_.str();
2893 error( RtAudioError::WARNING );
2897 // Determine the device channel information.
2898 long inputChannels, outputChannels;
2899 result = ASIOGetChannels( &inputChannels, &outputChannels );
2900 if ( result != ASE_OK ) {
2901 drivers.removeCurrentDriver();
2902 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
2903 errorText_ = errorStream_.str();
2904 error( RtAudioError::WARNING );
2908 info.outputChannels = outputChannels;
2909 info.inputChannels = inputChannels;
2910 if ( info.outputChannels > 0 && info.inputChannels > 0 )
2911 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
2913 // Determine the supported sample rates.
2914 info.sampleRates.clear();
2915 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
2916 result = ASIOCanSampleRate( (ASIOSampleRate) SAMPLE_RATES[i] );
2917 if ( result == ASE_OK ) {
2918 info.sampleRates.push_back( SAMPLE_RATES[i] );
2920 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
2921 info.preferredSampleRate = SAMPLE_RATES[i];
2925 // Determine supported data types ... just check first channel and assume rest are the same.
2926 ASIOChannelInfo channelInfo;
2927 channelInfo.channel = 0;
2928 channelInfo.isInput = true;
2929 if ( info.inputChannels <= 0 ) channelInfo.isInput = false;
2930 result = ASIOGetChannelInfo( &channelInfo );
2931 if ( result != ASE_OK ) {
2932 drivers.removeCurrentDriver();
2933 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting driver channel info (" << driverName << ").";
2934 errorText_ = errorStream_.str();
2935 error( RtAudioError::WARNING );
2939 info.nativeFormats = 0;
2940 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB )
2941 info.nativeFormats |= RTAUDIO_SINT16;
2942 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB )
2943 info.nativeFormats |= RTAUDIO_SINT32;
2944 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB )
2945 info.nativeFormats |= RTAUDIO_FLOAT32;
2946 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB )
2947 info.nativeFormats |= RTAUDIO_FLOAT64;
2948 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB )
2949 info.nativeFormats |= RTAUDIO_SINT24;
2951 if ( info.outputChannels > 0 )
2952 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
2953 if ( info.inputChannels > 0 )
2954 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
2957 drivers.removeCurrentDriver();
2961 static void bufferSwitch( long index, ASIOBool /*processNow*/ )
2963 RtApiAsio *object = (RtApiAsio *) asioCallbackInfo->object;
2964 object->callbackEvent( index );
2967 void RtApiAsio :: saveDeviceInfo( void )
2971 unsigned int nDevices = getDeviceCount();
2972 devices_.resize( nDevices );
2973 for ( unsigned int i=0; i<nDevices; i++ )
2974 devices_[i] = getDeviceInfo( i );
2977 bool RtApiAsio :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
2978 unsigned int firstChannel, unsigned int sampleRate,
2979 RtAudioFormat format, unsigned int *bufferSize,
2980 RtAudio::StreamOptions *options )
2981 {////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
2983 bool isDuplexInput = mode == INPUT && stream_.mode == OUTPUT;
2985 // For ASIO, a duplex stream MUST use the same driver.
2986 if ( isDuplexInput && stream_.device[0] != device ) {
2987 errorText_ = "RtApiAsio::probeDeviceOpen: an ASIO duplex stream must use the same device for input and output!";
2991 char driverName[32];
2992 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
2993 if ( result != ASE_OK ) {
2994 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to get driver name (" << getAsioErrorString( result ) << ").";
2995 errorText_ = errorStream_.str();
2999 // Only load the driver once for duplex stream.
3000 if ( !isDuplexInput ) {
3001 // The getDeviceInfo() function will not work when a stream is open
3002 // because ASIO does not allow multiple devices to run at the same
3003 // time. Thus, we'll probe the system before opening a stream and
3004 // save the results for use by getDeviceInfo().
3005 this->saveDeviceInfo();
3007 if ( !drivers.loadDriver( driverName ) ) {
3008 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to load driver (" << driverName << ").";
3009 errorText_ = errorStream_.str();
3013 result = ASIOInit( &driverInfo );
3014 if ( result != ASE_OK ) {
3015 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
3016 errorText_ = errorStream_.str();
3021 // keep them before any "goto error", they are used for error cleanup + goto device boundary checks
3022 bool buffersAllocated = false;
3023 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3024 unsigned int nChannels;
3027 // Check the device channel count.
3028 long inputChannels, outputChannels;
3029 result = ASIOGetChannels( &inputChannels, &outputChannels );
3030 if ( result != ASE_OK ) {
3031 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
3032 errorText_ = errorStream_.str();
3036 if ( ( mode == OUTPUT && (channels+firstChannel) > (unsigned int) outputChannels) ||
3037 ( mode == INPUT && (channels+firstChannel) > (unsigned int) inputChannels) ) {
3038 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested channel count (" << channels << ") + offset (" << firstChannel << ").";
3039 errorText_ = errorStream_.str();
3042 stream_.nDeviceChannels[mode] = channels;
3043 stream_.nUserChannels[mode] = channels;
3044 stream_.channelOffset[mode] = firstChannel;
3046 // Verify the sample rate is supported.
3047 result = ASIOCanSampleRate( (ASIOSampleRate) sampleRate );
3048 if ( result != ASE_OK ) {
3049 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested sample rate (" << sampleRate << ").";
3050 errorText_ = errorStream_.str();
3054 // Get the current sample rate
3055 ASIOSampleRate currentRate;
3056 result = ASIOGetSampleRate( ¤tRate );
3057 if ( result != ASE_OK ) {
3058 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error getting sample rate.";
3059 errorText_ = errorStream_.str();
3063 // Set the sample rate only if necessary
3064 if ( currentRate != sampleRate ) {
3065 result = ASIOSetSampleRate( (ASIOSampleRate) sampleRate );
3066 if ( result != ASE_OK ) {
3067 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error setting sample rate (" << sampleRate << ").";
3068 errorText_ = errorStream_.str();
3073 // Determine the driver data type.
3074 ASIOChannelInfo channelInfo;
3075 channelInfo.channel = 0;
3076 if ( mode == OUTPUT ) channelInfo.isInput = false;
3077 else channelInfo.isInput = true;
3078 result = ASIOGetChannelInfo( &channelInfo );
3079 if ( result != ASE_OK ) {
3080 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting data format.";
3081 errorText_ = errorStream_.str();
3085 // Assuming WINDOWS host is always little-endian.
3086 stream_.doByteSwap[mode] = false;
3087 stream_.userFormat = format;
3088 stream_.deviceFormat[mode] = 0;
3089 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB ) {
3090 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
3091 if ( channelInfo.type == ASIOSTInt16MSB ) stream_.doByteSwap[mode] = true;
3093 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB ) {
3094 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
3095 if ( channelInfo.type == ASIOSTInt32MSB ) stream_.doByteSwap[mode] = true;
3097 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB ) {
3098 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
3099 if ( channelInfo.type == ASIOSTFloat32MSB ) stream_.doByteSwap[mode] = true;
3101 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB ) {
3102 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
3103 if ( channelInfo.type == ASIOSTFloat64MSB ) stream_.doByteSwap[mode] = true;
3105 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB ) {
3106 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
3107 if ( channelInfo.type == ASIOSTInt24MSB ) stream_.doByteSwap[mode] = true;
3110 if ( stream_.deviceFormat[mode] == 0 ) {
3111 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") data format not supported by RtAudio.";
3112 errorText_ = errorStream_.str();
3116 // Set the buffer size. For a duplex stream, this will end up
3117 // setting the buffer size based on the input constraints, which
3119 long minSize, maxSize, preferSize, granularity;
3120 result = ASIOGetBufferSize( &minSize, &maxSize, &preferSize, &granularity );
3121 if ( result != ASE_OK ) {
3122 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting buffer size.";
3123 errorText_ = errorStream_.str();
3127 if ( isDuplexInput ) {
3128 // When this is the duplex input (output was opened before), then we have to use the same
3129 // buffersize as the output, because it might use the preferred buffer size, which most
3130 // likely wasn't passed as input to this. The buffer sizes have to be identically anyway,
3131 // So instead of throwing an error, make them equal. The caller uses the reference
3132 // to the "bufferSize" param as usual to set up processing buffers.
3134 *bufferSize = stream_.bufferSize;
3137 if ( *bufferSize == 0 ) *bufferSize = preferSize;
3138 else if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
3139 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
3140 else if ( granularity == -1 ) {
3141 // Make sure bufferSize is a power of two.
3142 int log2_of_min_size = 0;
3143 int log2_of_max_size = 0;
3145 for ( unsigned int i = 0; i < sizeof(long) * 8; i++ ) {
3146 if ( minSize & ((long)1 << i) ) log2_of_min_size = i;
3147 if ( maxSize & ((long)1 << i) ) log2_of_max_size = i;
3150 long min_delta = std::abs( (long)*bufferSize - ((long)1 << log2_of_min_size) );
3151 int min_delta_num = log2_of_min_size;
3153 for (int i = log2_of_min_size + 1; i <= log2_of_max_size; i++) {
3154 long current_delta = std::abs( (long)*bufferSize - ((long)1 << i) );
3155 if (current_delta < min_delta) {
3156 min_delta = current_delta;
3161 *bufferSize = ( (unsigned int)1 << min_delta_num );
3162 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
3163 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
3165 else if ( granularity != 0 ) {
3166 // Set to an even multiple of granularity, rounding up.
3167 *bufferSize = (*bufferSize + granularity-1) / granularity * granularity;
3172 // we don't use it anymore, see above!
3173 // Just left it here for the case...
3174 if ( isDuplexInput && stream_.bufferSize != *bufferSize ) {
3175 errorText_ = "RtApiAsio::probeDeviceOpen: input/output buffersize discrepancy!";
3180 stream_.bufferSize = *bufferSize;
3181 stream_.nBuffers = 2;
3183 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
3184 else stream_.userInterleaved = true;
3186 // ASIO always uses non-interleaved buffers.
3187 stream_.deviceInterleaved[mode] = false;
3189 // Allocate, if necessary, our AsioHandle structure for the stream.
3190 if ( handle == 0 ) {
3192 handle = new AsioHandle;
3194 catch ( std::bad_alloc& ) {
3195 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating AsioHandle memory.";
3198 handle->bufferInfos = 0;
3200 // Create a manual-reset event.
3201 handle->condition = CreateEvent( NULL, // no security
3202 TRUE, // manual-reset
3203 FALSE, // non-signaled initially
3205 stream_.apiHandle = (void *) handle;
3208 // Create the ASIO internal buffers. Since RtAudio sets up input
3209 // and output separately, we'll have to dispose of previously
3210 // created output buffers for a duplex stream.
3211 if ( mode == INPUT && stream_.mode == OUTPUT ) {
3212 ASIODisposeBuffers();
3213 if ( handle->bufferInfos ) free( handle->bufferInfos );
3216 // Allocate, initialize, and save the bufferInfos in our stream callbackInfo structure.
3218 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
3219 handle->bufferInfos = (ASIOBufferInfo *) malloc( nChannels * sizeof(ASIOBufferInfo) );
3220 if ( handle->bufferInfos == NULL ) {
3221 errorStream_ << "RtApiAsio::probeDeviceOpen: error allocating bufferInfo memory for driver (" << driverName << ").";
3222 errorText_ = errorStream_.str();
3226 ASIOBufferInfo *infos;
3227 infos = handle->bufferInfos;
3228 for ( i=0; i<stream_.nDeviceChannels[0]; i++, infos++ ) {
3229 infos->isInput = ASIOFalse;
3230 infos->channelNum = i + stream_.channelOffset[0];
3231 infos->buffers[0] = infos->buffers[1] = 0;
3233 for ( i=0; i<stream_.nDeviceChannels[1]; i++, infos++ ) {
3234 infos->isInput = ASIOTrue;
3235 infos->channelNum = i + stream_.channelOffset[1];
3236 infos->buffers[0] = infos->buffers[1] = 0;
3239 // prepare for callbacks
3240 stream_.sampleRate = sampleRate;
3241 stream_.device[mode] = device;
3242 stream_.mode = isDuplexInput ? DUPLEX : mode;
3244 // store this class instance before registering callbacks, that are going to use it
3245 asioCallbackInfo = &stream_.callbackInfo;
3246 stream_.callbackInfo.object = (void *) this;
3248 // Set up the ASIO callback structure and create the ASIO data buffers.
3249 asioCallbacks.bufferSwitch = &bufferSwitch;
3250 asioCallbacks.sampleRateDidChange = &sampleRateChanged;
3251 asioCallbacks.asioMessage = &asioMessages;
3252 asioCallbacks.bufferSwitchTimeInfo = NULL;
3253 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
3254 if ( result != ASE_OK ) {
3255 // Standard method failed. This can happen with strict/misbehaving drivers that return valid buffer size ranges
3256 // but only accept the preferred buffer size as parameter for ASIOCreateBuffers (e.g. Creative's ASIO driver).
3257 // In that case, let's be naïve and try that instead.
3258 *bufferSize = preferSize;
3259 stream_.bufferSize = *bufferSize;
3260 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
3263 if ( result != ASE_OK ) {
3264 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") creating buffers.";
3265 errorText_ = errorStream_.str();
3268 buffersAllocated = true;
3269 stream_.state = STREAM_STOPPED;
3271 // Set flags for buffer conversion.
3272 stream_.doConvertBuffer[mode] = false;
3273 if ( stream_.userFormat != stream_.deviceFormat[mode] )
3274 stream_.doConvertBuffer[mode] = true;
3275 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
3276 stream_.nUserChannels[mode] > 1 )
3277 stream_.doConvertBuffer[mode] = true;
3279 // Allocate necessary internal buffers
3280 unsigned long bufferBytes;
3281 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
3282 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
3283 if ( stream_.userBuffer[mode] == NULL ) {
3284 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating user buffer memory.";
3288 if ( stream_.doConvertBuffer[mode] ) {
3290 bool makeBuffer = true;
3291 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
3292 if ( isDuplexInput && stream_.deviceBuffer ) {
3293 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
3294 if ( bufferBytes <= bytesOut ) makeBuffer = false;
3298 bufferBytes *= *bufferSize;
3299 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
3300 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
3301 if ( stream_.deviceBuffer == NULL ) {
3302 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating device buffer memory.";
3308 // Determine device latencies
3309 long inputLatency, outputLatency;
3310 result = ASIOGetLatencies( &inputLatency, &outputLatency );
3311 if ( result != ASE_OK ) {
3312 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting latency.";
3313 errorText_ = errorStream_.str();
3314 error( RtAudioError::WARNING); // warn but don't fail
3317 stream_.latency[0] = outputLatency;
3318 stream_.latency[1] = inputLatency;
3321 // Setup the buffer conversion information structure. We don't use
3322 // buffers to do channel offsets, so we override that parameter
3324 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
3329 if ( !isDuplexInput ) {
3330 // the cleanup for error in the duplex input, is done by RtApi::openStream
3331 // So we clean up for single channel only
3333 if ( buffersAllocated )
3334 ASIODisposeBuffers();
3336 drivers.removeCurrentDriver();
3339 CloseHandle( handle->condition );
3340 if ( handle->bufferInfos )
3341 free( handle->bufferInfos );
3344 stream_.apiHandle = 0;
3348 if ( stream_.userBuffer[mode] ) {
3349 free( stream_.userBuffer[mode] );
3350 stream_.userBuffer[mode] = 0;
3353 if ( stream_.deviceBuffer ) {
3354 free( stream_.deviceBuffer );
3355 stream_.deviceBuffer = 0;
3360 }////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
3362 void RtApiAsio :: closeStream()
3364 if ( stream_.state == STREAM_CLOSED ) {
3365 errorText_ = "RtApiAsio::closeStream(): no open stream to close!";
3366 error( RtAudioError::WARNING );
3370 if ( stream_.state == STREAM_RUNNING ) {
3371 stream_.state = STREAM_STOPPED;
3374 ASIODisposeBuffers();
3375 drivers.removeCurrentDriver();
3377 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3379 CloseHandle( handle->condition );
3380 if ( handle->bufferInfos )
3381 free( handle->bufferInfos );
3383 stream_.apiHandle = 0;
3386 for ( int i=0; i<2; i++ ) {
3387 if ( stream_.userBuffer[i] ) {
3388 free( stream_.userBuffer[i] );
3389 stream_.userBuffer[i] = 0;
3393 if ( stream_.deviceBuffer ) {
3394 free( stream_.deviceBuffer );
3395 stream_.deviceBuffer = 0;
3398 stream_.mode = UNINITIALIZED;
3399 stream_.state = STREAM_CLOSED;
3402 bool stopThreadCalled = false;
3404 void RtApiAsio :: startStream()
3407 if ( stream_.state == STREAM_RUNNING ) {
3408 errorText_ = "RtApiAsio::startStream(): the stream is already running!";
3409 error( RtAudioError::WARNING );
3413 #if defined( HAVE_GETTIMEOFDAY )
3414 gettimeofday( &stream_.lastTickTimestamp, NULL );
3417 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3418 ASIOError result = ASIOStart();
3419 if ( result != ASE_OK ) {
3420 errorStream_ << "RtApiAsio::startStream: error (" << getAsioErrorString( result ) << ") starting device.";
3421 errorText_ = errorStream_.str();
3425 handle->drainCounter = 0;
3426 handle->internalDrain = false;
3427 ResetEvent( handle->condition );
3428 stream_.state = STREAM_RUNNING;
3432 stopThreadCalled = false;
3434 if ( result == ASE_OK ) return;
3435 error( RtAudioError::SYSTEM_ERROR );
3438 void RtApiAsio :: stopStream()
3441 if ( stream_.state == STREAM_STOPPED ) {
3442 errorText_ = "RtApiAsio::stopStream(): the stream is already stopped!";
3443 error( RtAudioError::WARNING );
3447 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3448 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
3449 if ( handle->drainCounter == 0 ) {
3450 handle->drainCounter = 2;
3451 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
3455 stream_.state = STREAM_STOPPED;
3457 ASIOError result = ASIOStop();
3458 if ( result != ASE_OK ) {
3459 errorStream_ << "RtApiAsio::stopStream: error (" << getAsioErrorString( result ) << ") stopping device.";
3460 errorText_ = errorStream_.str();
3463 if ( result == ASE_OK ) return;
3464 error( RtAudioError::SYSTEM_ERROR );
3467 void RtApiAsio :: abortStream()
3470 if ( stream_.state == STREAM_STOPPED ) {
3471 errorText_ = "RtApiAsio::abortStream(): the stream is already stopped!";
3472 error( RtAudioError::WARNING );
3476 // The following lines were commented-out because some behavior was
3477 // noted where the device buffers need to be zeroed to avoid
3478 // continuing sound, even when the device buffers are completely
3479 // disposed. So now, calling abort is the same as calling stop.
3480 // AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3481 // handle->drainCounter = 2;
3485 // This function will be called by a spawned thread when the user
3486 // callback function signals that the stream should be stopped or
3487 // aborted. It is necessary to handle it this way because the
3488 // callbackEvent() function must return before the ASIOStop()
3489 // function will return.
3490 static unsigned __stdcall asioStopStream( void *ptr )
3492 CallbackInfo *info = (CallbackInfo *) ptr;
3493 RtApiAsio *object = (RtApiAsio *) info->object;
3495 object->stopStream();
3500 bool RtApiAsio :: callbackEvent( long bufferIndex )
3502 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
3503 if ( stream_.state == STREAM_CLOSED ) {
3504 errorText_ = "RtApiAsio::callbackEvent(): the stream is closed ... this shouldn't happen!";
3505 error( RtAudioError::WARNING );
3509 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
3510 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3512 // Check if we were draining the stream and signal if finished.
3513 if ( handle->drainCounter > 3 ) {
3515 stream_.state = STREAM_STOPPING;
3516 if ( handle->internalDrain == false )
3517 SetEvent( handle->condition );
3518 else { // spawn a thread to stop the stream
3520 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
3521 &stream_.callbackInfo, 0, &threadId );
3526 // Invoke user callback to get fresh output data UNLESS we are
3528 if ( handle->drainCounter == 0 ) {
3529 RtAudioCallback callback = (RtAudioCallback) info->callback;
3530 double streamTime = getStreamTime();
3531 RtAudioStreamStatus status = 0;
3532 if ( stream_.mode != INPUT && asioXRun == true ) {
3533 status |= RTAUDIO_OUTPUT_UNDERFLOW;
3536 if ( stream_.mode != OUTPUT && asioXRun == true ) {
3537 status |= RTAUDIO_INPUT_OVERFLOW;
3540 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
3541 stream_.bufferSize, streamTime, status, info->userData );
3542 if ( cbReturnValue == 2 ) {
3543 stream_.state = STREAM_STOPPING;
3544 handle->drainCounter = 2;
3546 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
3547 &stream_.callbackInfo, 0, &threadId );
3550 else if ( cbReturnValue == 1 ) {
3551 handle->drainCounter = 1;
3552 handle->internalDrain = true;
3556 unsigned int nChannels, bufferBytes, i, j;
3557 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
3558 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
3560 bufferBytes = stream_.bufferSize * formatBytes( stream_.deviceFormat[0] );
3562 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
3564 for ( i=0, j=0; i<nChannels; i++ ) {
3565 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3566 memset( handle->bufferInfos[i].buffers[bufferIndex], 0, bufferBytes );
3570 else if ( stream_.doConvertBuffer[0] ) {
3572 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
3573 if ( stream_.doByteSwap[0] )
3574 byteSwapBuffer( stream_.deviceBuffer,
3575 stream_.bufferSize * stream_.nDeviceChannels[0],
3576 stream_.deviceFormat[0] );
3578 for ( i=0, j=0; i<nChannels; i++ ) {
3579 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3580 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
3581 &stream_.deviceBuffer[j++*bufferBytes], bufferBytes );
3587 if ( stream_.doByteSwap[0] )
3588 byteSwapBuffer( stream_.userBuffer[0],
3589 stream_.bufferSize * stream_.nUserChannels[0],
3590 stream_.userFormat );
3592 for ( i=0, j=0; i<nChannels; i++ ) {
3593 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3594 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
3595 &stream_.userBuffer[0][bufferBytes*j++], bufferBytes );
3601 // Don't bother draining input
3602 if ( handle->drainCounter ) {
3603 handle->drainCounter++;
3607 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
3609 bufferBytes = stream_.bufferSize * formatBytes(stream_.deviceFormat[1]);
3611 if (stream_.doConvertBuffer[1]) {
3613 // Always interleave ASIO input data.
3614 for ( i=0, j=0; i<nChannels; i++ ) {
3615 if ( handle->bufferInfos[i].isInput == ASIOTrue )
3616 memcpy( &stream_.deviceBuffer[j++*bufferBytes],
3617 handle->bufferInfos[i].buffers[bufferIndex],
3621 if ( stream_.doByteSwap[1] )
3622 byteSwapBuffer( stream_.deviceBuffer,
3623 stream_.bufferSize * stream_.nDeviceChannels[1],
3624 stream_.deviceFormat[1] );
3625 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
3629 for ( i=0, j=0; i<nChannels; i++ ) {
3630 if ( handle->bufferInfos[i].isInput == ASIOTrue ) {
3631 memcpy( &stream_.userBuffer[1][bufferBytes*j++],
3632 handle->bufferInfos[i].buffers[bufferIndex],
3637 if ( stream_.doByteSwap[1] )
3638 byteSwapBuffer( stream_.userBuffer[1],
3639 stream_.bufferSize * stream_.nUserChannels[1],
3640 stream_.userFormat );
3645 // The following call was suggested by Malte Clasen. While the API
3646 // documentation indicates it should not be required, some device
3647 // drivers apparently do not function correctly without it.
3650 RtApi::tickStreamTime();
3654 static void sampleRateChanged( ASIOSampleRate sRate )
3656 // The ASIO documentation says that this usually only happens during
3657 // external sync. Audio processing is not stopped by the driver,
3658 // actual sample rate might not have even changed, maybe only the
3659 // sample rate status of an AES/EBU or S/PDIF digital input at the
3662 RtApi *object = (RtApi *) asioCallbackInfo->object;
3664 object->stopStream();
3666 catch ( RtAudioError &exception ) {
3667 std::cerr << "\nRtApiAsio: sampleRateChanged() error (" << exception.getMessage() << ")!\n" << std::endl;
3671 std::cerr << "\nRtApiAsio: driver reports sample rate changed to " << sRate << " ... stream stopped!!!\n" << std::endl;
3674 static long asioMessages( long selector, long value, void* /*message*/, double* /*opt*/ )
3678 switch( selector ) {
3679 case kAsioSelectorSupported:
3680 if ( value == kAsioResetRequest
3681 || value == kAsioEngineVersion
3682 || value == kAsioResyncRequest
3683 || value == kAsioLatenciesChanged
3684 // The following three were added for ASIO 2.0, you don't
3685 // necessarily have to support them.
3686 || value == kAsioSupportsTimeInfo
3687 || value == kAsioSupportsTimeCode
3688 || value == kAsioSupportsInputMonitor)
3691 case kAsioResetRequest:
3692 // Defer the task and perform the reset of the driver during the
3693 // next "safe" situation. You cannot reset the driver right now,
3694 // as this code is called from the driver. Reset the driver is
3695 // done by completely destruct is. I.e. ASIOStop(),
3696 // ASIODisposeBuffers(), Destruction Afterwards you initialize the
3698 std::cerr << "\nRtApiAsio: driver reset requested!!!" << std::endl;
3701 case kAsioResyncRequest:
3702 // This informs the application that the driver encountered some
3703 // non-fatal data loss. It is used for synchronization purposes
3704 // of different media. Added mainly to work around the Win16Mutex
3705 // problems in Windows 95/98 with the Windows Multimedia system,
3706 // which could lose data because the Mutex was held too long by
3707 // another thread. However a driver can issue it in other
3709 // std::cerr << "\nRtApiAsio: driver resync requested!!!" << std::endl;
3713 case kAsioLatenciesChanged:
3714 // This will inform the host application that the drivers were
3715 // latencies changed. Beware, it this does not mean that the
3716 // buffer sizes have changed! You might need to update internal
3718 std::cerr << "\nRtApiAsio: driver latency may have changed!!!" << std::endl;
3721 case kAsioEngineVersion:
3722 // Return the supported ASIO version of the host application. If
3723 // a host application does not implement this selector, ASIO 1.0
3724 // is assumed by the driver.
3727 case kAsioSupportsTimeInfo:
3728 // Informs the driver whether the
3729 // asioCallbacks.bufferSwitchTimeInfo() callback is supported.
3730 // For compatibility with ASIO 1.0 drivers the host application
3731 // should always support the "old" bufferSwitch method, too.
3734 case kAsioSupportsTimeCode:
3735 // Informs the driver whether application is interested in time
3736 // code info. If an application does not need to know about time
3737 // code, the driver has less work to do.
3744 static const char* getAsioErrorString( ASIOError result )
3752 static const Messages m[] =
3754 { ASE_NotPresent, "Hardware input or output is not present or available." },
3755 { ASE_HWMalfunction, "Hardware is malfunctioning." },
3756 { ASE_InvalidParameter, "Invalid input parameter." },
3757 { ASE_InvalidMode, "Invalid mode." },
3758 { ASE_SPNotAdvancing, "Sample position not advancing." },
3759 { ASE_NoClock, "Sample clock or rate cannot be determined or is not present." },
3760 { ASE_NoMemory, "Not enough memory to complete the request." }
3763 for ( unsigned int i = 0; i < sizeof(m)/sizeof(m[0]); ++i )
3764 if ( m[i].value == result ) return m[i].message;
3766 return "Unknown error.";
3769 //******************** End of __WINDOWS_ASIO__ *********************//
3773 #if defined(__WINDOWS_WASAPI__) // Windows WASAPI API
3775 // Authored by Marcus Tomlinson <themarcustomlinson@gmail.com>, April 2014
3776 // - Introduces support for the Windows WASAPI API
3777 // - Aims to deliver bit streams to and from hardware at the lowest possible latency, via the absolute minimum buffer sizes required
3778 // - Provides flexible stream configuration to an otherwise strict and inflexible WASAPI interface
3779 // - Includes automatic internal conversion of sample rate and buffer size between hardware and the user
3786 #include <mferror.h>
3788 #include <mftransform.h>
3789 #include <wmcodecdsp.h>
3791 #include <audioclient.h>
3793 #include <mmdeviceapi.h>
3794 #include <functiondiscoverykeys_devpkey.h>
3796 #ifndef MF_E_TRANSFORM_NEED_MORE_INPUT
3797 #define MF_E_TRANSFORM_NEED_MORE_INPUT _HRESULT_TYPEDEF_(0xc00d6d72)
3800 #ifndef MFSTARTUP_NOSOCKET
3801 #define MFSTARTUP_NOSOCKET 0x1
3805 #pragma comment( lib, "ksuser" )
3806 #pragma comment( lib, "mfplat.lib" )
3807 #pragma comment( lib, "mfuuid.lib" )
3808 #pragma comment( lib, "wmcodecdspuuid" )
3811 //=============================================================================
3813 #define SAFE_RELEASE( objectPtr )\
3816 objectPtr->Release();\
3820 typedef HANDLE ( __stdcall *TAvSetMmThreadCharacteristicsPtr )( LPCWSTR TaskName, LPDWORD TaskIndex );
3822 //-----------------------------------------------------------------------------
3824 // WASAPI dictates stream sample rate, format, channel count, and in some cases, buffer size.
3825 // Therefore we must perform all necessary conversions to user buffers in order to satisfy these
3826 // requirements. WasapiBuffer ring buffers are used between HwIn->UserIn and UserOut->HwOut to
3827 // provide intermediate storage for read / write synchronization.
3841 // sets the length of the internal ring buffer
3842 void setBufferSize( unsigned int bufferSize, unsigned int formatBytes ) {
3845 buffer_ = ( char* ) calloc( bufferSize, formatBytes );
3847 bufferSize_ = bufferSize;
3852 // attempt to push a buffer into the ring buffer at the current "in" index
3853 bool pushBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
3855 if ( !buffer || // incoming buffer is NULL
3856 bufferSize == 0 || // incoming buffer has no data
3857 bufferSize > bufferSize_ ) // incoming buffer too large
3862 unsigned int relOutIndex = outIndex_;
3863 unsigned int inIndexEnd = inIndex_ + bufferSize;
3864 if ( relOutIndex < inIndex_ && inIndexEnd >= bufferSize_ ) {
3865 relOutIndex += bufferSize_;
3868 // the "IN" index CAN BEGIN at the "OUT" index
3869 // the "IN" index CANNOT END at the "OUT" index
3870 if ( inIndex_ < relOutIndex && inIndexEnd >= relOutIndex ) {
3871 return false; // not enough space between "in" index and "out" index
3874 // copy buffer from external to internal
3875 int fromZeroSize = inIndex_ + bufferSize - bufferSize_;
3876 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
3877 int fromInSize = bufferSize - fromZeroSize;
3882 memcpy( &( ( char* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( char ) );
3883 memcpy( buffer_, &( ( char* ) buffer )[fromInSize], fromZeroSize * sizeof( char ) );
3885 case RTAUDIO_SINT16:
3886 memcpy( &( ( short* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( short ) );
3887 memcpy( buffer_, &( ( short* ) buffer )[fromInSize], fromZeroSize * sizeof( short ) );
3889 case RTAUDIO_SINT24:
3890 memcpy( &( ( S24* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( S24 ) );
3891 memcpy( buffer_, &( ( S24* ) buffer )[fromInSize], fromZeroSize * sizeof( S24 ) );
3893 case RTAUDIO_SINT32:
3894 memcpy( &( ( int* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( int ) );
3895 memcpy( buffer_, &( ( int* ) buffer )[fromInSize], fromZeroSize * sizeof( int ) );
3897 case RTAUDIO_FLOAT32:
3898 memcpy( &( ( float* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( float ) );
3899 memcpy( buffer_, &( ( float* ) buffer )[fromInSize], fromZeroSize * sizeof( float ) );
3901 case RTAUDIO_FLOAT64:
3902 memcpy( &( ( double* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( double ) );
3903 memcpy( buffer_, &( ( double* ) buffer )[fromInSize], fromZeroSize * sizeof( double ) );
3907 // update "in" index
3908 inIndex_ += bufferSize;
3909 inIndex_ %= bufferSize_;
3914 // attempt to pull a buffer from the ring buffer from the current "out" index
3915 bool pullBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
3917 if ( !buffer || // incoming buffer is NULL
3918 bufferSize == 0 || // incoming buffer has no data
3919 bufferSize > bufferSize_ ) // incoming buffer too large
3924 unsigned int relInIndex = inIndex_;
3925 unsigned int outIndexEnd = outIndex_ + bufferSize;
3926 if ( relInIndex < outIndex_ && outIndexEnd >= bufferSize_ ) {
3927 relInIndex += bufferSize_;
3930 // the "OUT" index CANNOT BEGIN at the "IN" index
3931 // the "OUT" index CAN END at the "IN" index
3932 if ( outIndex_ <= relInIndex && outIndexEnd > relInIndex ) {
3933 return false; // not enough space between "out" index and "in" index
3936 // copy buffer from internal to external
3937 int fromZeroSize = outIndex_ + bufferSize - bufferSize_;
3938 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
3939 int fromOutSize = bufferSize - fromZeroSize;
3944 memcpy( buffer, &( ( char* ) buffer_ )[outIndex_], fromOutSize * sizeof( char ) );
3945 memcpy( &( ( char* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( char ) );
3947 case RTAUDIO_SINT16:
3948 memcpy( buffer, &( ( short* ) buffer_ )[outIndex_], fromOutSize * sizeof( short ) );
3949 memcpy( &( ( short* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( short ) );
3951 case RTAUDIO_SINT24:
3952 memcpy( buffer, &( ( S24* ) buffer_ )[outIndex_], fromOutSize * sizeof( S24 ) );
3953 memcpy( &( ( S24* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( S24 ) );
3955 case RTAUDIO_SINT32:
3956 memcpy( buffer, &( ( int* ) buffer_ )[outIndex_], fromOutSize * sizeof( int ) );
3957 memcpy( &( ( int* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( int ) );
3959 case RTAUDIO_FLOAT32:
3960 memcpy( buffer, &( ( float* ) buffer_ )[outIndex_], fromOutSize * sizeof( float ) );
3961 memcpy( &( ( float* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( float ) );
3963 case RTAUDIO_FLOAT64:
3964 memcpy( buffer, &( ( double* ) buffer_ )[outIndex_], fromOutSize * sizeof( double ) );
3965 memcpy( &( ( double* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( double ) );
3969 // update "out" index
3970 outIndex_ += bufferSize;
3971 outIndex_ %= bufferSize_;
3978 unsigned int bufferSize_;
3979 unsigned int inIndex_;
3980 unsigned int outIndex_;
3983 //-----------------------------------------------------------------------------
3985 // In order to satisfy WASAPI's buffer requirements, we need a means of converting sample rate
3986 // between HW and the user. The WasapiResampler class is used to perform this conversion between
3987 // HwIn->UserIn and UserOut->HwOut during the stream callback loop.
3988 class WasapiResampler
3991 WasapiResampler( bool isFloat, unsigned int bitsPerSample, unsigned int channelCount,
3992 unsigned int inSampleRate, unsigned int outSampleRate )
3993 : _bytesPerSample( bitsPerSample / 8 )
3994 , _channelCount( channelCount )
3995 , _sampleRatio( ( float ) outSampleRate / inSampleRate )
3996 , _transformUnk( NULL )
3997 , _transform( NULL )
3998 , _mediaType( NULL )
3999 , _inputMediaType( NULL )
4000 , _outputMediaType( NULL )
4002 #ifdef __IWMResamplerProps_FWD_DEFINED__
4003 , _resamplerProps( NULL )
4006 // 1. Initialization
4008 MFStartup( MF_VERSION, MFSTARTUP_NOSOCKET );
4010 // 2. Create Resampler Transform Object
4012 CoCreateInstance( CLSID_CResamplerMediaObject, NULL, CLSCTX_INPROC_SERVER,
4013 IID_IUnknown, ( void** ) &_transformUnk );
4015 _transformUnk->QueryInterface( IID_PPV_ARGS( &_transform ) );
4017 #ifdef __IWMResamplerProps_FWD_DEFINED__
4018 _transformUnk->QueryInterface( IID_PPV_ARGS( &_resamplerProps ) );
4019 _resamplerProps->SetHalfFilterLength( 60 ); // best conversion quality
4022 // 3. Specify input / output format
4024 MFCreateMediaType( &_mediaType );
4025 _mediaType->SetGUID( MF_MT_MAJOR_TYPE, MFMediaType_Audio );
4026 _mediaType->SetGUID( MF_MT_SUBTYPE, isFloat ? MFAudioFormat_Float : MFAudioFormat_PCM );
4027 _mediaType->SetUINT32( MF_MT_AUDIO_NUM_CHANNELS, channelCount );
4028 _mediaType->SetUINT32( MF_MT_AUDIO_SAMPLES_PER_SECOND, inSampleRate );
4029 _mediaType->SetUINT32( MF_MT_AUDIO_BLOCK_ALIGNMENT, _bytesPerSample * channelCount );
4030 _mediaType->SetUINT32( MF_MT_AUDIO_AVG_BYTES_PER_SECOND, _bytesPerSample * channelCount * inSampleRate );
4031 _mediaType->SetUINT32( MF_MT_AUDIO_BITS_PER_SAMPLE, bitsPerSample );
4032 _mediaType->SetUINT32( MF_MT_ALL_SAMPLES_INDEPENDENT, TRUE );
4034 MFCreateMediaType( &_inputMediaType );
4035 _mediaType->CopyAllItems( _inputMediaType );
4037 _transform->SetInputType( 0, _inputMediaType, 0 );
4039 MFCreateMediaType( &_outputMediaType );
4040 _mediaType->CopyAllItems( _outputMediaType );
4042 _outputMediaType->SetUINT32( MF_MT_AUDIO_SAMPLES_PER_SECOND, outSampleRate );
4043 _outputMediaType->SetUINT32( MF_MT_AUDIO_AVG_BYTES_PER_SECOND, _bytesPerSample * channelCount * outSampleRate );
4045 _transform->SetOutputType( 0, _outputMediaType, 0 );
4047 // 4. Send stream start messages to Resampler
4049 _transform->ProcessMessage( MFT_MESSAGE_COMMAND_FLUSH, 0 );
4050 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_BEGIN_STREAMING, 0 );
4051 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_START_OF_STREAM, 0 );
4056 // 8. Send stream stop messages to Resampler
4058 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_END_OF_STREAM, 0 );
4059 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_END_STREAMING, 0 );
4065 SAFE_RELEASE( _transformUnk );
4066 SAFE_RELEASE( _transform );
4067 SAFE_RELEASE( _mediaType );
4068 SAFE_RELEASE( _inputMediaType );
4069 SAFE_RELEASE( _outputMediaType );
4071 #ifdef __IWMResamplerProps_FWD_DEFINED__
4072 SAFE_RELEASE( _resamplerProps );
4076 void Convert( char* outBuffer, const char* inBuffer, unsigned int inSampleCount, unsigned int& outSampleCount )
4078 unsigned int inputBufferSize = _bytesPerSample * _channelCount * inSampleCount;
4079 if ( _sampleRatio == 1 )
4081 // no sample rate conversion required
4082 memcpy( outBuffer, inBuffer, inputBufferSize );
4083 outSampleCount = inSampleCount;
4087 unsigned int outputBufferSize = ( unsigned int ) ceilf( inputBufferSize * _sampleRatio ) + ( _bytesPerSample * _channelCount );
4089 IMFMediaBuffer* rInBuffer;
4090 IMFSample* rInSample;
4091 BYTE* rInByteBuffer = NULL;
4093 // 5. Create Sample object from input data
4095 MFCreateMemoryBuffer( inputBufferSize, &rInBuffer );
4097 rInBuffer->Lock( &rInByteBuffer, NULL, NULL );
4098 memcpy( rInByteBuffer, inBuffer, inputBufferSize );
4099 rInBuffer->Unlock();
4100 rInByteBuffer = NULL;
4102 rInBuffer->SetCurrentLength( inputBufferSize );
4104 MFCreateSample( &rInSample );
4105 rInSample->AddBuffer( rInBuffer );
4107 // 6. Pass input data to Resampler
4109 _transform->ProcessInput( 0, rInSample, 0 );
4111 SAFE_RELEASE( rInBuffer );
4112 SAFE_RELEASE( rInSample );
4114 // 7. Perform sample rate conversion
4116 IMFMediaBuffer* rOutBuffer = NULL;
4117 BYTE* rOutByteBuffer = NULL;
4119 MFT_OUTPUT_DATA_BUFFER rOutDataBuffer;
4121 DWORD rBytes = outputBufferSize; // maximum bytes accepted per ProcessOutput
4123 // 7.1 Create Sample object for output data
4125 memset( &rOutDataBuffer, 0, sizeof rOutDataBuffer );
4126 MFCreateSample( &( rOutDataBuffer.pSample ) );
4127 MFCreateMemoryBuffer( rBytes, &rOutBuffer );
4128 rOutDataBuffer.pSample->AddBuffer( rOutBuffer );
4129 rOutDataBuffer.dwStreamID = 0;
4130 rOutDataBuffer.dwStatus = 0;
4131 rOutDataBuffer.pEvents = NULL;
4133 // 7.2 Get output data from Resampler
4135 if ( _transform->ProcessOutput( 0, 1, &rOutDataBuffer, &rStatus ) == MF_E_TRANSFORM_NEED_MORE_INPUT )
4138 SAFE_RELEASE( rOutBuffer );
4139 SAFE_RELEASE( rOutDataBuffer.pSample );
4143 // 7.3 Write output data to outBuffer
4145 SAFE_RELEASE( rOutBuffer );
4146 rOutDataBuffer.pSample->ConvertToContiguousBuffer( &rOutBuffer );
4147 rOutBuffer->GetCurrentLength( &rBytes );
4149 rOutBuffer->Lock( &rOutByteBuffer, NULL, NULL );
4150 memcpy( outBuffer, rOutByteBuffer, rBytes );
4151 rOutBuffer->Unlock();
4152 rOutByteBuffer = NULL;
4154 outSampleCount = rBytes / _bytesPerSample / _channelCount;
4155 SAFE_RELEASE( rOutBuffer );
4156 SAFE_RELEASE( rOutDataBuffer.pSample );
4160 unsigned int _bytesPerSample;
4161 unsigned int _channelCount;
4164 IUnknown* _transformUnk;
4165 IMFTransform* _transform;
4166 IMFMediaType* _mediaType;
4167 IMFMediaType* _inputMediaType;
4168 IMFMediaType* _outputMediaType;
4170 #ifdef __IWMResamplerProps_FWD_DEFINED__
4171 IWMResamplerProps* _resamplerProps;
4175 //-----------------------------------------------------------------------------
4177 // A structure to hold various information related to the WASAPI implementation.
4180 IAudioClient* captureAudioClient;
4181 IAudioClient* renderAudioClient;
4182 IAudioCaptureClient* captureClient;
4183 IAudioRenderClient* renderClient;
4184 HANDLE captureEvent;
4188 : captureAudioClient( NULL ),
4189 renderAudioClient( NULL ),
4190 captureClient( NULL ),
4191 renderClient( NULL ),
4192 captureEvent( NULL ),
4193 renderEvent( NULL ) {}
4196 //=============================================================================
4198 RtApiWasapi::RtApiWasapi()
4199 : coInitialized_( false ), deviceEnumerator_( NULL )
4201 // WASAPI can run either apartment or multi-threaded
4202 HRESULT hr = CoInitialize( NULL );
4203 if ( !FAILED( hr ) )
4204 coInitialized_ = true;
4206 // Instantiate device enumerator
4207 hr = CoCreateInstance( __uuidof( MMDeviceEnumerator ), NULL,
4208 CLSCTX_ALL, __uuidof( IMMDeviceEnumerator ),
4209 ( void** ) &deviceEnumerator_ );
4211 // If this runs on an old Windows, it will fail. Ignore and proceed.
4213 deviceEnumerator_ = NULL;
4216 //-----------------------------------------------------------------------------
4218 RtApiWasapi::~RtApiWasapi()
4220 if ( stream_.state != STREAM_CLOSED )
4223 SAFE_RELEASE( deviceEnumerator_ );
4225 // If this object previously called CoInitialize()
4226 if ( coInitialized_ )
4230 //=============================================================================
4232 unsigned int RtApiWasapi::getDeviceCount( void )
4234 unsigned int captureDeviceCount = 0;
4235 unsigned int renderDeviceCount = 0;
4237 IMMDeviceCollection* captureDevices = NULL;
4238 IMMDeviceCollection* renderDevices = NULL;
4240 if ( !deviceEnumerator_ )
4243 // Count capture devices
4245 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4246 if ( FAILED( hr ) ) {
4247 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device collection.";
4251 hr = captureDevices->GetCount( &captureDeviceCount );
4252 if ( FAILED( hr ) ) {
4253 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device count.";
4257 // Count render devices
4258 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4259 if ( FAILED( hr ) ) {
4260 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device collection.";
4264 hr = renderDevices->GetCount( &renderDeviceCount );
4265 if ( FAILED( hr ) ) {
4266 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device count.";
4271 // release all references
4272 SAFE_RELEASE( captureDevices );
4273 SAFE_RELEASE( renderDevices );
4275 if ( errorText_.empty() )
4276 return captureDeviceCount + renderDeviceCount;
4278 error( RtAudioError::DRIVER_ERROR );
4282 //-----------------------------------------------------------------------------
4284 RtAudio::DeviceInfo RtApiWasapi::getDeviceInfo( unsigned int device )
4286 RtAudio::DeviceInfo info;
4287 unsigned int captureDeviceCount = 0;
4288 unsigned int renderDeviceCount = 0;
4289 std::string defaultDeviceName;
4290 bool isCaptureDevice = false;
4292 PROPVARIANT deviceNameProp;
4293 PROPVARIANT defaultDeviceNameProp;
4295 IMMDeviceCollection* captureDevices = NULL;
4296 IMMDeviceCollection* renderDevices = NULL;
4297 IMMDevice* devicePtr = NULL;
4298 IMMDevice* defaultDevicePtr = NULL;
4299 IAudioClient* audioClient = NULL;
4300 IPropertyStore* devicePropStore = NULL;
4301 IPropertyStore* defaultDevicePropStore = NULL;
4303 WAVEFORMATEX* deviceFormat = NULL;
4304 WAVEFORMATEX* closestMatchFormat = NULL;
4307 info.probed = false;
4309 // Count capture devices
4311 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4312 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4313 if ( FAILED( hr ) ) {
4314 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device collection.";
4318 hr = captureDevices->GetCount( &captureDeviceCount );
4319 if ( FAILED( hr ) ) {
4320 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device count.";
4324 // Count render devices
4325 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4326 if ( FAILED( hr ) ) {
4327 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device collection.";
4331 hr = renderDevices->GetCount( &renderDeviceCount );
4332 if ( FAILED( hr ) ) {
4333 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device count.";
4337 // validate device index
4338 if ( device >= captureDeviceCount + renderDeviceCount ) {
4339 errorText_ = "RtApiWasapi::getDeviceInfo: Invalid device index.";
4340 errorType = RtAudioError::INVALID_USE;
4344 // determine whether index falls within capture or render devices
4345 if ( device >= renderDeviceCount ) {
4346 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
4347 if ( FAILED( hr ) ) {
4348 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device handle.";
4351 isCaptureDevice = true;
4354 hr = renderDevices->Item( device, &devicePtr );
4355 if ( FAILED( hr ) ) {
4356 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device handle.";
4359 isCaptureDevice = false;
4362 // get default device name
4363 if ( isCaptureDevice ) {
4364 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eCapture, eConsole, &defaultDevicePtr );
4365 if ( FAILED( hr ) ) {
4366 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default capture device handle.";
4371 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eRender, eConsole, &defaultDevicePtr );
4372 if ( FAILED( hr ) ) {
4373 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default render device handle.";
4378 hr = defaultDevicePtr->OpenPropertyStore( STGM_READ, &defaultDevicePropStore );
4379 if ( FAILED( hr ) ) {
4380 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open default device property store.";
4383 PropVariantInit( &defaultDeviceNameProp );
4385 hr = defaultDevicePropStore->GetValue( PKEY_Device_FriendlyName, &defaultDeviceNameProp );
4386 if ( FAILED( hr ) ) {
4387 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default device property: PKEY_Device_FriendlyName.";
4391 defaultDeviceName = convertCharPointerToStdString(defaultDeviceNameProp.pwszVal);
4394 hr = devicePtr->OpenPropertyStore( STGM_READ, &devicePropStore );
4395 if ( FAILED( hr ) ) {
4396 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open device property store.";
4400 PropVariantInit( &deviceNameProp );
4402 hr = devicePropStore->GetValue( PKEY_Device_FriendlyName, &deviceNameProp );
4403 if ( FAILED( hr ) ) {
4404 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device property: PKEY_Device_FriendlyName.";
4408 info.name =convertCharPointerToStdString(deviceNameProp.pwszVal);
4411 if ( isCaptureDevice ) {
4412 info.isDefaultInput = info.name == defaultDeviceName;
4413 info.isDefaultOutput = false;
4416 info.isDefaultInput = false;
4417 info.isDefaultOutput = info.name == defaultDeviceName;
4421 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL, NULL, ( void** ) &audioClient );
4422 if ( FAILED( hr ) ) {
4423 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device audio client.";
4427 hr = audioClient->GetMixFormat( &deviceFormat );
4428 if ( FAILED( hr ) ) {
4429 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device mix format.";
4433 if ( isCaptureDevice ) {
4434 info.inputChannels = deviceFormat->nChannels;
4435 info.outputChannels = 0;
4436 info.duplexChannels = 0;
4439 info.inputChannels = 0;
4440 info.outputChannels = deviceFormat->nChannels;
4441 info.duplexChannels = 0;
4445 info.sampleRates.clear();
4447 // allow support for all sample rates as we have a built-in sample rate converter
4448 for ( unsigned int i = 0; i < MAX_SAMPLE_RATES; i++ ) {
4449 info.sampleRates.push_back( SAMPLE_RATES[i] );
4451 info.preferredSampleRate = deviceFormat->nSamplesPerSec;
4454 info.nativeFormats = 0;
4456 if ( deviceFormat->wFormatTag == WAVE_FORMAT_IEEE_FLOAT ||
4457 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
4458 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_IEEE_FLOAT ) )
4460 if ( deviceFormat->wBitsPerSample == 32 ) {
4461 info.nativeFormats |= RTAUDIO_FLOAT32;
4463 else if ( deviceFormat->wBitsPerSample == 64 ) {
4464 info.nativeFormats |= RTAUDIO_FLOAT64;
4467 else if ( deviceFormat->wFormatTag == WAVE_FORMAT_PCM ||
4468 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
4469 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_PCM ) )
4471 if ( deviceFormat->wBitsPerSample == 8 ) {
4472 info.nativeFormats |= RTAUDIO_SINT8;
4474 else if ( deviceFormat->wBitsPerSample == 16 ) {
4475 info.nativeFormats |= RTAUDIO_SINT16;
4477 else if ( deviceFormat->wBitsPerSample == 24 ) {
4478 info.nativeFormats |= RTAUDIO_SINT24;
4480 else if ( deviceFormat->wBitsPerSample == 32 ) {
4481 info.nativeFormats |= RTAUDIO_SINT32;
4489 // release all references
4490 PropVariantClear( &deviceNameProp );
4491 PropVariantClear( &defaultDeviceNameProp );
4493 SAFE_RELEASE( captureDevices );
4494 SAFE_RELEASE( renderDevices );
4495 SAFE_RELEASE( devicePtr );
4496 SAFE_RELEASE( defaultDevicePtr );
4497 SAFE_RELEASE( audioClient );
4498 SAFE_RELEASE( devicePropStore );
4499 SAFE_RELEASE( defaultDevicePropStore );
4501 CoTaskMemFree( deviceFormat );
4502 CoTaskMemFree( closestMatchFormat );
4504 if ( !errorText_.empty() )
4509 //-----------------------------------------------------------------------------
4511 unsigned int RtApiWasapi::getDefaultOutputDevice( void )
4513 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
4514 if ( getDeviceInfo( i ).isDefaultOutput ) {
4522 //-----------------------------------------------------------------------------
4524 unsigned int RtApiWasapi::getDefaultInputDevice( void )
4526 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
4527 if ( getDeviceInfo( i ).isDefaultInput ) {
4535 //-----------------------------------------------------------------------------
4537 void RtApiWasapi::closeStream( void )
4539 if ( stream_.state == STREAM_CLOSED ) {
4540 errorText_ = "RtApiWasapi::closeStream: No open stream to close.";
4541 error( RtAudioError::WARNING );
4545 if ( stream_.state != STREAM_STOPPED )
4548 // clean up stream memory
4549 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient )
4550 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient )
4552 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureClient )
4553 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderClient )
4555 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent )
4556 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent );
4558 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent )
4559 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent );
4561 delete ( WasapiHandle* ) stream_.apiHandle;
4562 stream_.apiHandle = NULL;
4564 for ( int i = 0; i < 2; i++ ) {
4565 if ( stream_.userBuffer[i] ) {
4566 free( stream_.userBuffer[i] );
4567 stream_.userBuffer[i] = 0;
4571 if ( stream_.deviceBuffer ) {
4572 free( stream_.deviceBuffer );
4573 stream_.deviceBuffer = 0;
4576 // update stream state
4577 stream_.state = STREAM_CLOSED;
4580 //-----------------------------------------------------------------------------
4582 void RtApiWasapi::startStream( void )
4586 if ( stream_.state == STREAM_RUNNING ) {
4587 errorText_ = "RtApiWasapi::startStream: The stream is already running.";
4588 error( RtAudioError::WARNING );
4592 #if defined( HAVE_GETTIMEOFDAY )
4593 gettimeofday( &stream_.lastTickTimestamp, NULL );
4596 // update stream state
4597 stream_.state = STREAM_RUNNING;
4599 // create WASAPI stream thread
4600 stream_.callbackInfo.thread = ( ThreadHandle ) CreateThread( NULL, 0, runWasapiThread, this, CREATE_SUSPENDED, NULL );
4602 if ( !stream_.callbackInfo.thread ) {
4603 errorText_ = "RtApiWasapi::startStream: Unable to instantiate callback thread.";
4604 error( RtAudioError::THREAD_ERROR );
4607 SetThreadPriority( ( void* ) stream_.callbackInfo.thread, stream_.callbackInfo.priority );
4608 ResumeThread( ( void* ) stream_.callbackInfo.thread );
4612 //-----------------------------------------------------------------------------
4614 void RtApiWasapi::stopStream( void )
4618 if ( stream_.state == STREAM_STOPPED ) {
4619 errorText_ = "RtApiWasapi::stopStream: The stream is already stopped.";
4620 error( RtAudioError::WARNING );
4624 // inform stream thread by setting stream state to STREAM_STOPPING
4625 stream_.state = STREAM_STOPPING;
4627 // wait until stream thread is stopped
4628 while( stream_.state != STREAM_STOPPED ) {
4632 // Wait for the last buffer to play before stopping.
4633 Sleep( 1000 * stream_.bufferSize / stream_.sampleRate );
4635 // close thread handle
4636 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
4637 errorText_ = "RtApiWasapi::stopStream: Unable to close callback thread.";
4638 error( RtAudioError::THREAD_ERROR );
4642 stream_.callbackInfo.thread = (ThreadHandle) NULL;
4645 //-----------------------------------------------------------------------------
4647 void RtApiWasapi::abortStream( void )
4651 if ( stream_.state == STREAM_STOPPED ) {
4652 errorText_ = "RtApiWasapi::abortStream: The stream is already stopped.";
4653 error( RtAudioError::WARNING );
4657 // inform stream thread by setting stream state to STREAM_STOPPING
4658 stream_.state = STREAM_STOPPING;
4660 // wait until stream thread is stopped
4661 while ( stream_.state != STREAM_STOPPED ) {
4665 // close thread handle
4666 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
4667 errorText_ = "RtApiWasapi::abortStream: Unable to close callback thread.";
4668 error( RtAudioError::THREAD_ERROR );
4672 stream_.callbackInfo.thread = (ThreadHandle) NULL;
4675 //-----------------------------------------------------------------------------
4677 bool RtApiWasapi::probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
4678 unsigned int firstChannel, unsigned int sampleRate,
4679 RtAudioFormat format, unsigned int* bufferSize,
4680 RtAudio::StreamOptions* options )
4682 bool methodResult = FAILURE;
4683 unsigned int captureDeviceCount = 0;
4684 unsigned int renderDeviceCount = 0;
4686 IMMDeviceCollection* captureDevices = NULL;
4687 IMMDeviceCollection* renderDevices = NULL;
4688 IMMDevice* devicePtr = NULL;
4689 WAVEFORMATEX* deviceFormat = NULL;
4690 unsigned int bufferBytes;
4691 stream_.state = STREAM_STOPPED;
4693 // create API Handle if not already created
4694 if ( !stream_.apiHandle )
4695 stream_.apiHandle = ( void* ) new WasapiHandle();
4697 // Count capture devices
4699 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4700 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4701 if ( FAILED( hr ) ) {
4702 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device collection.";
4706 hr = captureDevices->GetCount( &captureDeviceCount );
4707 if ( FAILED( hr ) ) {
4708 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device count.";
4712 // Count render devices
4713 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4714 if ( FAILED( hr ) ) {
4715 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device collection.";
4719 hr = renderDevices->GetCount( &renderDeviceCount );
4720 if ( FAILED( hr ) ) {
4721 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device count.";
4725 // validate device index
4726 if ( device >= captureDeviceCount + renderDeviceCount ) {
4727 errorType = RtAudioError::INVALID_USE;
4728 errorText_ = "RtApiWasapi::probeDeviceOpen: Invalid device index.";
4732 // if device index falls within capture devices
4733 if ( device >= renderDeviceCount ) {
4734 if ( mode != INPUT ) {
4735 errorType = RtAudioError::INVALID_USE;
4736 errorText_ = "RtApiWasapi::probeDeviceOpen: Capture device selected as output device.";
4740 // retrieve captureAudioClient from devicePtr
4741 IAudioClient*& captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4743 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
4744 if ( FAILED( hr ) ) {
4745 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device handle.";
4749 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4750 NULL, ( void** ) &captureAudioClient );
4751 if ( FAILED( hr ) ) {
4752 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device audio client.";
4756 hr = captureAudioClient->GetMixFormat( &deviceFormat );
4757 if ( FAILED( hr ) ) {
4758 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device mix format.";
4762 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4763 captureAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4766 // if device index falls within render devices and is configured for loopback
4767 if ( device < renderDeviceCount && mode == INPUT )
4769 // if renderAudioClient is not initialised, initialise it now
4770 IAudioClient*& renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4771 if ( !renderAudioClient )
4773 probeDeviceOpen( device, OUTPUT, channels, firstChannel, sampleRate, format, bufferSize, options );
4776 // retrieve captureAudioClient from devicePtr
4777 IAudioClient*& captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4779 hr = renderDevices->Item( device, &devicePtr );
4780 if ( FAILED( hr ) ) {
4781 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device handle.";
4785 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4786 NULL, ( void** ) &captureAudioClient );
4787 if ( FAILED( hr ) ) {
4788 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device audio client.";
4792 hr = captureAudioClient->GetMixFormat( &deviceFormat );
4793 if ( FAILED( hr ) ) {
4794 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device mix format.";
4798 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4799 captureAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4802 // if device index falls within render devices and is configured for output
4803 if ( device < renderDeviceCount && mode == OUTPUT )
4805 // if renderAudioClient is already initialised, don't initialise it again
4806 IAudioClient*& renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4807 if ( renderAudioClient )
4809 methodResult = SUCCESS;
4813 hr = renderDevices->Item( device, &devicePtr );
4814 if ( FAILED( hr ) ) {
4815 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device handle.";
4819 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4820 NULL, ( void** ) &renderAudioClient );
4821 if ( FAILED( hr ) ) {
4822 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device audio client.";
4826 hr = renderAudioClient->GetMixFormat( &deviceFormat );
4827 if ( FAILED( hr ) ) {
4828 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device mix format.";
4832 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4833 renderAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4837 if ( ( stream_.mode == OUTPUT && mode == INPUT ) ||
4838 ( stream_.mode == INPUT && mode == OUTPUT ) ) {
4839 stream_.mode = DUPLEX;
4842 stream_.mode = mode;
4845 stream_.device[mode] = device;
4846 stream_.doByteSwap[mode] = false;
4847 stream_.sampleRate = sampleRate;
4848 stream_.bufferSize = *bufferSize;
4849 stream_.nBuffers = 1;
4850 stream_.nUserChannels[mode] = channels;
4851 stream_.channelOffset[mode] = firstChannel;
4852 stream_.userFormat = format;
4853 stream_.deviceFormat[mode] = getDeviceInfo( device ).nativeFormats;
4855 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
4856 stream_.userInterleaved = false;
4858 stream_.userInterleaved = true;
4859 stream_.deviceInterleaved[mode] = true;
4861 // Set flags for buffer conversion.
4862 stream_.doConvertBuffer[mode] = false;
4863 if ( stream_.userFormat != stream_.deviceFormat[mode] ||
4864 stream_.nUserChannels[0] != stream_.nDeviceChannels[0] ||
4865 stream_.nUserChannels[1] != stream_.nDeviceChannels[1] )
4866 stream_.doConvertBuffer[mode] = true;
4867 else if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
4868 stream_.nUserChannels[mode] > 1 )
4869 stream_.doConvertBuffer[mode] = true;
4871 if ( stream_.doConvertBuffer[mode] )
4872 setConvertInfo( mode, firstChannel );
4874 // Allocate necessary internal buffers
4875 bufferBytes = stream_.nUserChannels[mode] * stream_.bufferSize * formatBytes( stream_.userFormat );
4877 stream_.userBuffer[mode] = ( char* ) calloc( bufferBytes, 1 );
4878 if ( !stream_.userBuffer[mode] ) {
4879 errorType = RtAudioError::MEMORY_ERROR;
4880 errorText_ = "RtApiWasapi::probeDeviceOpen: Error allocating user buffer memory.";
4884 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME )
4885 stream_.callbackInfo.priority = 15;
4887 stream_.callbackInfo.priority = 0;
4889 ///! TODO: RTAUDIO_MINIMIZE_LATENCY // Provide stream buffers directly to callback
4890 ///! TODO: RTAUDIO_HOG_DEVICE // Exclusive mode
4892 methodResult = SUCCESS;
4896 SAFE_RELEASE( captureDevices );
4897 SAFE_RELEASE( renderDevices );
4898 SAFE_RELEASE( devicePtr );
4899 CoTaskMemFree( deviceFormat );
4901 // if method failed, close the stream
4902 if ( methodResult == FAILURE )
4905 if ( !errorText_.empty() )
4907 return methodResult;
4910 //=============================================================================
4912 DWORD WINAPI RtApiWasapi::runWasapiThread( void* wasapiPtr )
4915 ( ( RtApiWasapi* ) wasapiPtr )->wasapiThread();
4920 DWORD WINAPI RtApiWasapi::stopWasapiThread( void* wasapiPtr )
4923 ( ( RtApiWasapi* ) wasapiPtr )->stopStream();
4928 DWORD WINAPI RtApiWasapi::abortWasapiThread( void* wasapiPtr )
4931 ( ( RtApiWasapi* ) wasapiPtr )->abortStream();
4936 //-----------------------------------------------------------------------------
4938 void RtApiWasapi::wasapiThread()
4940 // as this is a new thread, we must CoInitialize it
4941 CoInitialize( NULL );
4945 IAudioClient* captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4946 IAudioClient* renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4947 IAudioCaptureClient* captureClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureClient;
4948 IAudioRenderClient* renderClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderClient;
4949 HANDLE captureEvent = ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent;
4950 HANDLE renderEvent = ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent;
4952 WAVEFORMATEX* captureFormat = NULL;
4953 WAVEFORMATEX* renderFormat = NULL;
4954 float captureSrRatio = 0.0f;
4955 float renderSrRatio = 0.0f;
4956 WasapiBuffer captureBuffer;
4957 WasapiBuffer renderBuffer;
4958 WasapiResampler* captureResampler = NULL;
4959 WasapiResampler* renderResampler = NULL;
4961 // declare local stream variables
4962 RtAudioCallback callback = ( RtAudioCallback ) stream_.callbackInfo.callback;
4963 BYTE* streamBuffer = NULL;
4964 DWORD captureFlags = 0;
4965 unsigned int bufferFrameCount = 0;
4966 unsigned int numFramesPadding = 0;
4967 unsigned int convBufferSize = 0;
4968 bool loopbackEnabled = stream_.device[INPUT] == stream_.device[OUTPUT];
4969 bool callbackPushed = true;
4970 bool callbackPulled = false;
4971 bool callbackStopped = false;
4972 int callbackResult = 0;
4974 // convBuffer is used to store converted buffers between WASAPI and the user
4975 char* convBuffer = NULL;
4976 unsigned int convBuffSize = 0;
4977 unsigned int deviceBuffSize = 0;
4979 std::string errorText;
4980 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4982 // Attempt to assign "Pro Audio" characteristic to thread
4983 HMODULE AvrtDll = LoadLibrary( (LPCTSTR) "AVRT.dll" );
4985 DWORD taskIndex = 0;
4986 TAvSetMmThreadCharacteristicsPtr AvSetMmThreadCharacteristicsPtr =
4987 ( TAvSetMmThreadCharacteristicsPtr ) (void(*)()) GetProcAddress( AvrtDll, "AvSetMmThreadCharacteristicsW" );
4988 AvSetMmThreadCharacteristicsPtr( L"Pro Audio", &taskIndex );
4989 FreeLibrary( AvrtDll );
4992 // start capture stream if applicable
4993 if ( captureAudioClient ) {
4994 hr = captureAudioClient->GetMixFormat( &captureFormat );
4995 if ( FAILED( hr ) ) {
4996 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
5000 // init captureResampler
5001 captureResampler = new WasapiResampler( stream_.deviceFormat[INPUT] == RTAUDIO_FLOAT32 || stream_.deviceFormat[INPUT] == RTAUDIO_FLOAT64,
5002 formatBytes( stream_.deviceFormat[INPUT] ) * 8, stream_.nDeviceChannels[INPUT],
5003 captureFormat->nSamplesPerSec, stream_.sampleRate );
5005 captureSrRatio = ( ( float ) captureFormat->nSamplesPerSec / stream_.sampleRate );
5007 if ( !captureClient ) {
5008 hr = captureAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
5009 loopbackEnabled ? AUDCLNT_STREAMFLAGS_LOOPBACK : AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
5014 if ( FAILED( hr ) ) {
5015 errorText = "RtApiWasapi::wasapiThread: Unable to initialize capture audio client.";
5019 hr = captureAudioClient->GetService( __uuidof( IAudioCaptureClient ),
5020 ( void** ) &captureClient );
5021 if ( FAILED( hr ) ) {
5022 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve capture client handle.";
5026 // don't configure captureEvent if in loopback mode
5027 if ( !loopbackEnabled )
5029 // configure captureEvent to trigger on every available capture buffer
5030 captureEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
5031 if ( !captureEvent ) {
5032 errorType = RtAudioError::SYSTEM_ERROR;
5033 errorText = "RtApiWasapi::wasapiThread: Unable to create capture event.";
5037 hr = captureAudioClient->SetEventHandle( captureEvent );
5038 if ( FAILED( hr ) ) {
5039 errorText = "RtApiWasapi::wasapiThread: Unable to set capture event handle.";
5043 ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent = captureEvent;
5046 ( ( WasapiHandle* ) stream_.apiHandle )->captureClient = captureClient;
5048 // reset the capture stream
5049 hr = captureAudioClient->Reset();
5050 if ( FAILED( hr ) ) {
5051 errorText = "RtApiWasapi::wasapiThread: Unable to reset capture stream.";
5055 // start the capture stream
5056 hr = captureAudioClient->Start();
5057 if ( FAILED( hr ) ) {
5058 errorText = "RtApiWasapi::wasapiThread: Unable to start capture stream.";
5063 unsigned int inBufferSize = 0;
5064 hr = captureAudioClient->GetBufferSize( &inBufferSize );
5065 if ( FAILED( hr ) ) {
5066 errorText = "RtApiWasapi::wasapiThread: Unable to get capture buffer size.";
5070 // scale outBufferSize according to stream->user sample rate ratio
5071 unsigned int outBufferSize = ( unsigned int ) ceilf( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT];
5072 inBufferSize *= stream_.nDeviceChannels[INPUT];
5074 // set captureBuffer size
5075 captureBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[INPUT] ) );
5078 // start render stream if applicable
5079 if ( renderAudioClient ) {
5080 hr = renderAudioClient->GetMixFormat( &renderFormat );
5081 if ( FAILED( hr ) ) {
5082 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
5086 // init renderResampler
5087 renderResampler = new WasapiResampler( stream_.deviceFormat[OUTPUT] == RTAUDIO_FLOAT32 || stream_.deviceFormat[OUTPUT] == RTAUDIO_FLOAT64,
5088 formatBytes( stream_.deviceFormat[OUTPUT] ) * 8, stream_.nDeviceChannels[OUTPUT],
5089 stream_.sampleRate, renderFormat->nSamplesPerSec );
5091 renderSrRatio = ( ( float ) renderFormat->nSamplesPerSec / stream_.sampleRate );
5093 if ( !renderClient ) {
5094 hr = renderAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
5095 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
5100 if ( FAILED( hr ) ) {
5101 errorText = "RtApiWasapi::wasapiThread: Unable to initialize render audio client.";
5105 hr = renderAudioClient->GetService( __uuidof( IAudioRenderClient ),
5106 ( void** ) &renderClient );
5107 if ( FAILED( hr ) ) {
5108 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve render client handle.";
5112 // configure renderEvent to trigger on every available render buffer
5113 renderEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
5114 if ( !renderEvent ) {
5115 errorType = RtAudioError::SYSTEM_ERROR;
5116 errorText = "RtApiWasapi::wasapiThread: Unable to create render event.";
5120 hr = renderAudioClient->SetEventHandle( renderEvent );
5121 if ( FAILED( hr ) ) {
5122 errorText = "RtApiWasapi::wasapiThread: Unable to set render event handle.";
5126 ( ( WasapiHandle* ) stream_.apiHandle )->renderClient = renderClient;
5127 ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent = renderEvent;
5129 // reset the render stream
5130 hr = renderAudioClient->Reset();
5131 if ( FAILED( hr ) ) {
5132 errorText = "RtApiWasapi::wasapiThread: Unable to reset render stream.";
5136 // start the render stream
5137 hr = renderAudioClient->Start();
5138 if ( FAILED( hr ) ) {
5139 errorText = "RtApiWasapi::wasapiThread: Unable to start render stream.";
5144 unsigned int outBufferSize = 0;
5145 hr = renderAudioClient->GetBufferSize( &outBufferSize );
5146 if ( FAILED( hr ) ) {
5147 errorText = "RtApiWasapi::wasapiThread: Unable to get render buffer size.";
5151 // scale inBufferSize according to user->stream sample rate ratio
5152 unsigned int inBufferSize = ( unsigned int ) ceilf( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT];
5153 outBufferSize *= stream_.nDeviceChannels[OUTPUT];
5155 // set renderBuffer size
5156 renderBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[OUTPUT] ) );
5159 // malloc buffer memory
5160 if ( stream_.mode == INPUT )
5162 using namespace std; // for ceilf
5163 convBuffSize = ( size_t ) ( ceilf( stream_.bufferSize * captureSrRatio ) ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
5164 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
5166 else if ( stream_.mode == OUTPUT )
5168 convBuffSize = ( size_t ) ( ceilf( stream_.bufferSize * renderSrRatio ) ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
5169 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
5171 else if ( stream_.mode == DUPLEX )
5173 convBuffSize = std::max( ( size_t ) ( ceilf( stream_.bufferSize * captureSrRatio ) ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
5174 ( size_t ) ( ceilf( stream_.bufferSize * renderSrRatio ) ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
5175 deviceBuffSize = std::max( stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
5176 stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
5179 convBuffSize *= 2; // allow overflow for *SrRatio remainders
5180 convBuffer = ( char* ) calloc( convBuffSize, 1 );
5181 stream_.deviceBuffer = ( char* ) calloc( deviceBuffSize, 1 );
5182 if ( !convBuffer || !stream_.deviceBuffer ) {
5183 errorType = RtAudioError::MEMORY_ERROR;
5184 errorText = "RtApiWasapi::wasapiThread: Error allocating device buffer memory.";
5188 // stream process loop
5189 while ( stream_.state != STREAM_STOPPING ) {
5190 if ( !callbackPulled ) {
5193 // 1. Pull callback buffer from inputBuffer
5194 // 2. If 1. was successful: Convert callback buffer to user sample rate and channel count
5195 // Convert callback buffer to user format
5197 if ( captureAudioClient )
5199 int samplesToPull = ( unsigned int ) floorf( stream_.bufferSize * captureSrRatio );
5200 if ( captureSrRatio != 1 )
5202 // account for remainders
5207 while ( convBufferSize < stream_.bufferSize )
5209 // Pull callback buffer from inputBuffer
5210 callbackPulled = captureBuffer.pullBuffer( convBuffer,
5211 samplesToPull * stream_.nDeviceChannels[INPUT],
5212 stream_.deviceFormat[INPUT] );
5214 if ( !callbackPulled )
5219 // Convert callback buffer to user sample rate
5220 unsigned int deviceBufferOffset = convBufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
5221 unsigned int convSamples = 0;
5223 captureResampler->Convert( stream_.deviceBuffer + deviceBufferOffset,
5228 convBufferSize += convSamples;
5229 samplesToPull = 1; // now pull one sample at a time until we have stream_.bufferSize samples
5232 if ( callbackPulled )
5234 if ( stream_.doConvertBuffer[INPUT] ) {
5235 // Convert callback buffer to user format
5236 convertBuffer( stream_.userBuffer[INPUT],
5237 stream_.deviceBuffer,
5238 stream_.convertInfo[INPUT] );
5241 // no further conversion, simple copy deviceBuffer to userBuffer
5242 memcpy( stream_.userBuffer[INPUT],
5243 stream_.deviceBuffer,
5244 stream_.bufferSize * stream_.nUserChannels[INPUT] * formatBytes( stream_.userFormat ) );
5249 // if there is no capture stream, set callbackPulled flag
5250 callbackPulled = true;
5255 // 1. Execute user callback method
5256 // 2. Handle return value from callback
5258 // if callback has not requested the stream to stop
5259 if ( callbackPulled && !callbackStopped ) {
5260 // Execute user callback method
5261 callbackResult = callback( stream_.userBuffer[OUTPUT],
5262 stream_.userBuffer[INPUT],
5265 captureFlags & AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY ? RTAUDIO_INPUT_OVERFLOW : 0,
5266 stream_.callbackInfo.userData );
5269 RtApi::tickStreamTime();
5271 // Handle return value from callback
5272 if ( callbackResult == 1 ) {
5273 // instantiate a thread to stop this thread
5274 HANDLE threadHandle = CreateThread( NULL, 0, stopWasapiThread, this, 0, NULL );
5275 if ( !threadHandle ) {
5276 errorType = RtAudioError::THREAD_ERROR;
5277 errorText = "RtApiWasapi::wasapiThread: Unable to instantiate stream stop thread.";
5280 else if ( !CloseHandle( threadHandle ) ) {
5281 errorType = RtAudioError::THREAD_ERROR;
5282 errorText = "RtApiWasapi::wasapiThread: Unable to close stream stop thread handle.";
5286 callbackStopped = true;
5288 else if ( callbackResult == 2 ) {
5289 // instantiate a thread to stop this thread
5290 HANDLE threadHandle = CreateThread( NULL, 0, abortWasapiThread, this, 0, NULL );
5291 if ( !threadHandle ) {
5292 errorType = RtAudioError::THREAD_ERROR;
5293 errorText = "RtApiWasapi::wasapiThread: Unable to instantiate stream abort thread.";
5296 else if ( !CloseHandle( threadHandle ) ) {
5297 errorType = RtAudioError::THREAD_ERROR;
5298 errorText = "RtApiWasapi::wasapiThread: Unable to close stream abort thread handle.";
5302 callbackStopped = true;
5309 // 1. Convert callback buffer to stream format
5310 // 2. Convert callback buffer to stream sample rate and channel count
5311 // 3. Push callback buffer into outputBuffer
5313 if ( renderAudioClient && callbackPulled )
5315 // if the last call to renderBuffer.PushBuffer() was successful
5316 if ( callbackPushed || convBufferSize == 0 )
5318 if ( stream_.doConvertBuffer[OUTPUT] )
5320 // Convert callback buffer to stream format
5321 convertBuffer( stream_.deviceBuffer,
5322 stream_.userBuffer[OUTPUT],
5323 stream_.convertInfo[OUTPUT] );
5327 // no further conversion, simple copy userBuffer to deviceBuffer
5328 memcpy( stream_.deviceBuffer,
5329 stream_.userBuffer[OUTPUT],
5330 stream_.bufferSize * stream_.nUserChannels[OUTPUT] * formatBytes( stream_.userFormat ) );
5333 // Convert callback buffer to stream sample rate
5334 renderResampler->Convert( convBuffer,
5335 stream_.deviceBuffer,
5340 // Push callback buffer into outputBuffer
5341 callbackPushed = renderBuffer.pushBuffer( convBuffer,
5342 convBufferSize * stream_.nDeviceChannels[OUTPUT],
5343 stream_.deviceFormat[OUTPUT] );
5346 // if there is no render stream, set callbackPushed flag
5347 callbackPushed = true;
5352 // 1. Get capture buffer from stream
5353 // 2. Push capture buffer into inputBuffer
5354 // 3. If 2. was successful: Release capture buffer
5356 if ( captureAudioClient ) {
5357 // if the callback input buffer was not pulled from captureBuffer, wait for next capture event
5358 if ( !callbackPulled ) {
5359 WaitForSingleObject( loopbackEnabled ? renderEvent : captureEvent, INFINITE );
5362 // Get capture buffer from stream
5363 hr = captureClient->GetBuffer( &streamBuffer,
5365 &captureFlags, NULL, NULL );
5366 if ( FAILED( hr ) ) {
5367 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve capture buffer.";
5371 if ( bufferFrameCount != 0 ) {
5372 // Push capture buffer into inputBuffer
5373 if ( captureBuffer.pushBuffer( ( char* ) streamBuffer,
5374 bufferFrameCount * stream_.nDeviceChannels[INPUT],
5375 stream_.deviceFormat[INPUT] ) )
5377 // Release capture buffer
5378 hr = captureClient->ReleaseBuffer( bufferFrameCount );
5379 if ( FAILED( hr ) ) {
5380 errorText = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5386 // Inform WASAPI that capture was unsuccessful
5387 hr = captureClient->ReleaseBuffer( 0 );
5388 if ( FAILED( hr ) ) {
5389 errorText = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5396 // Inform WASAPI that capture was unsuccessful
5397 hr = captureClient->ReleaseBuffer( 0 );
5398 if ( FAILED( hr ) ) {
5399 errorText = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5407 // 1. Get render buffer from stream
5408 // 2. Pull next buffer from outputBuffer
5409 // 3. If 2. was successful: Fill render buffer with next buffer
5410 // Release render buffer
5412 if ( renderAudioClient ) {
5413 // if the callback output buffer was not pushed to renderBuffer, wait for next render event
5414 if ( callbackPulled && !callbackPushed ) {
5415 WaitForSingleObject( renderEvent, INFINITE );
5418 // Get render buffer from stream
5419 hr = renderAudioClient->GetBufferSize( &bufferFrameCount );
5420 if ( FAILED( hr ) ) {
5421 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer size.";
5425 hr = renderAudioClient->GetCurrentPadding( &numFramesPadding );
5426 if ( FAILED( hr ) ) {
5427 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer padding.";
5431 bufferFrameCount -= numFramesPadding;
5433 if ( bufferFrameCount != 0 ) {
5434 hr = renderClient->GetBuffer( bufferFrameCount, &streamBuffer );
5435 if ( FAILED( hr ) ) {
5436 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer.";
5440 // Pull next buffer from outputBuffer
5441 // Fill render buffer with next buffer
5442 if ( renderBuffer.pullBuffer( ( char* ) streamBuffer,
5443 bufferFrameCount * stream_.nDeviceChannels[OUTPUT],
5444 stream_.deviceFormat[OUTPUT] ) )
5446 // Release render buffer
5447 hr = renderClient->ReleaseBuffer( bufferFrameCount, 0 );
5448 if ( FAILED( hr ) ) {
5449 errorText = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5455 // Inform WASAPI that render was unsuccessful
5456 hr = renderClient->ReleaseBuffer( 0, 0 );
5457 if ( FAILED( hr ) ) {
5458 errorText = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5465 // Inform WASAPI that render was unsuccessful
5466 hr = renderClient->ReleaseBuffer( 0, 0 );
5467 if ( FAILED( hr ) ) {
5468 errorText = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5474 // if the callback buffer was pushed renderBuffer reset callbackPulled flag
5475 if ( callbackPushed ) {
5476 // unsetting the callbackPulled flag lets the stream know that
5477 // the audio device is ready for another callback output buffer.
5478 callbackPulled = false;
5485 CoTaskMemFree( captureFormat );
5486 CoTaskMemFree( renderFormat );
5488 free ( convBuffer );
5489 delete renderResampler;
5490 delete captureResampler;
5494 // update stream state
5495 stream_.state = STREAM_STOPPED;
5497 if ( !errorText.empty() )
5499 errorText_ = errorText;
5504 //******************** End of __WINDOWS_WASAPI__ *********************//
5508 #if defined(__WINDOWS_DS__) // Windows DirectSound API
5510 // Modified by Robin Davies, October 2005
5511 // - Improvements to DirectX pointer chasing.
5512 // - Bug fix for non-power-of-two Asio granularity used by Edirol PCR-A30.
5513 // - Auto-call CoInitialize for DSOUND and ASIO platforms.
5514 // Various revisions for RtAudio 4.0 by Gary Scavone, April 2007
5515 // Changed device query structure for RtAudio 4.0.7, January 2010
5517 #include <windows.h>
5518 #include <process.h>
5519 #include <mmsystem.h>
5523 #include <algorithm>
5525 #if defined(__MINGW32__)
5526 // missing from latest mingw winapi
5527 #define WAVE_FORMAT_96M08 0x00010000 /* 96 kHz, Mono, 8-bit */
5528 #define WAVE_FORMAT_96S08 0x00020000 /* 96 kHz, Stereo, 8-bit */
5529 #define WAVE_FORMAT_96M16 0x00040000 /* 96 kHz, Mono, 16-bit */
5530 #define WAVE_FORMAT_96S16 0x00080000 /* 96 kHz, Stereo, 16-bit */
5533 #define MINIMUM_DEVICE_BUFFER_SIZE 32768
5535 #ifdef _MSC_VER // if Microsoft Visual C++
5536 #pragma comment( lib, "winmm.lib" ) // then, auto-link winmm.lib. Otherwise, it has to be added manually.
5539 static inline DWORD dsPointerBetween( DWORD pointer, DWORD laterPointer, DWORD earlierPointer, DWORD bufferSize )
5541 if ( pointer > bufferSize ) pointer -= bufferSize;
5542 if ( laterPointer < earlierPointer ) laterPointer += bufferSize;
5543 if ( pointer < earlierPointer ) pointer += bufferSize;
5544 return pointer >= earlierPointer && pointer < laterPointer;
5547 // A structure to hold various information related to the DirectSound
5548 // API implementation.
5550 unsigned int drainCounter; // Tracks callback counts when draining
5551 bool internalDrain; // Indicates if stop is initiated from callback or not.
5555 UINT bufferPointer[2];
5556 DWORD dsBufferSize[2];
5557 DWORD dsPointerLeadTime[2]; // the number of bytes ahead of the safe pointer to lead by.
5561 :drainCounter(0), internalDrain(false) { id[0] = 0; id[1] = 0; buffer[0] = 0; buffer[1] = 0; xrun[0] = false; xrun[1] = false; bufferPointer[0] = 0; bufferPointer[1] = 0; }
5564 // Declarations for utility functions, callbacks, and structures
5565 // specific to the DirectSound implementation.
5566 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
5567 LPCTSTR description,
5571 static const char* getErrorString( int code );
5573 static unsigned __stdcall callbackHandler( void *ptr );
5582 : found(false) { validId[0] = false; validId[1] = false; }
5585 struct DsProbeData {
5587 std::vector<struct DsDevice>* dsDevices;
5590 RtApiDs :: RtApiDs()
5592 // Dsound will run both-threaded. If CoInitialize fails, then just
5593 // accept whatever the mainline chose for a threading model.
5594 coInitialized_ = false;
5595 HRESULT hr = CoInitialize( NULL );
5596 if ( !FAILED( hr ) ) coInitialized_ = true;
5599 RtApiDs :: ~RtApiDs()
5601 if ( stream_.state != STREAM_CLOSED ) closeStream();
5602 if ( coInitialized_ ) CoUninitialize(); // balanced call.
5605 // The DirectSound default output is always the first device.
5606 unsigned int RtApiDs :: getDefaultOutputDevice( void )
5611 // The DirectSound default input is always the first input device,
5612 // which is the first capture device enumerated.
5613 unsigned int RtApiDs :: getDefaultInputDevice( void )
5618 unsigned int RtApiDs :: getDeviceCount( void )
5620 // Set query flag for previously found devices to false, so that we
5621 // can check for any devices that have disappeared.
5622 for ( unsigned int i=0; i<dsDevices.size(); i++ )
5623 dsDevices[i].found = false;
5625 // Query DirectSound devices.
5626 struct DsProbeData probeInfo;
5627 probeInfo.isInput = false;
5628 probeInfo.dsDevices = &dsDevices;
5629 HRESULT result = DirectSoundEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
5630 if ( FAILED( result ) ) {
5631 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating output devices!";
5632 errorText_ = errorStream_.str();
5633 error( RtAudioError::WARNING );
5636 // Query DirectSoundCapture devices.
5637 probeInfo.isInput = true;
5638 result = DirectSoundCaptureEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
5639 if ( FAILED( result ) ) {
5640 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating input devices!";
5641 errorText_ = errorStream_.str();
5642 error( RtAudioError::WARNING );
5645 // Clean out any devices that may have disappeared (code update submitted by Eli Zehngut).
5646 for ( unsigned int i=0; i<dsDevices.size(); ) {
5647 if ( dsDevices[i].found == false ) dsDevices.erase( dsDevices.begin() + i );
5651 return static_cast<unsigned int>(dsDevices.size());
5654 RtAudio::DeviceInfo RtApiDs :: getDeviceInfo( unsigned int device )
5656 RtAudio::DeviceInfo info;
5657 info.probed = false;
5659 if ( dsDevices.size() == 0 ) {
5660 // Force a query of all devices
5662 if ( dsDevices.size() == 0 ) {
5663 errorText_ = "RtApiDs::getDeviceInfo: no devices found!";
5664 error( RtAudioError::INVALID_USE );
5669 if ( device >= dsDevices.size() ) {
5670 errorText_ = "RtApiDs::getDeviceInfo: device ID is invalid!";
5671 error( RtAudioError::INVALID_USE );
5676 if ( dsDevices[ device ].validId[0] == false ) goto probeInput;
5678 LPDIRECTSOUND output;
5680 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
5681 if ( FAILED( result ) ) {
5682 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
5683 errorText_ = errorStream_.str();
5684 error( RtAudioError::WARNING );
5688 outCaps.dwSize = sizeof( outCaps );
5689 result = output->GetCaps( &outCaps );
5690 if ( FAILED( result ) ) {
5692 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting capabilities!";
5693 errorText_ = errorStream_.str();
5694 error( RtAudioError::WARNING );
5698 // Get output channel information.
5699 info.outputChannels = ( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ? 2 : 1;
5701 // Get sample rate information.
5702 info.sampleRates.clear();
5703 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
5704 if ( SAMPLE_RATES[k] >= (unsigned int) outCaps.dwMinSecondarySampleRate &&
5705 SAMPLE_RATES[k] <= (unsigned int) outCaps.dwMaxSecondarySampleRate ) {
5706 info.sampleRates.push_back( SAMPLE_RATES[k] );
5708 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
5709 info.preferredSampleRate = SAMPLE_RATES[k];
5713 // Get format information.
5714 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT ) info.nativeFormats |= RTAUDIO_SINT16;
5715 if ( outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) info.nativeFormats |= RTAUDIO_SINT8;
5719 if ( getDefaultOutputDevice() == device )
5720 info.isDefaultOutput = true;
5722 if ( dsDevices[ device ].validId[1] == false ) {
5723 info.name = dsDevices[ device ].name;
5730 LPDIRECTSOUNDCAPTURE input;
5731 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
5732 if ( FAILED( result ) ) {
5733 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
5734 errorText_ = errorStream_.str();
5735 error( RtAudioError::WARNING );
5740 inCaps.dwSize = sizeof( inCaps );
5741 result = input->GetCaps( &inCaps );
5742 if ( FAILED( result ) ) {
5744 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting object capabilities (" << dsDevices[ device ].name << ")!";
5745 errorText_ = errorStream_.str();
5746 error( RtAudioError::WARNING );
5750 // Get input channel information.
5751 info.inputChannels = inCaps.dwChannels;
5753 // Get sample rate and format information.
5754 std::vector<unsigned int> rates;
5755 if ( inCaps.dwChannels >= 2 ) {
5756 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5757 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5758 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5759 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5760 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5761 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5762 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5763 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5765 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
5766 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) rates.push_back( 11025 );
5767 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) rates.push_back( 22050 );
5768 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) rates.push_back( 44100 );
5769 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) rates.push_back( 96000 );
5771 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
5772 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) rates.push_back( 11025 );
5773 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) rates.push_back( 22050 );
5774 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) rates.push_back( 44100 );
5775 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) rates.push_back( 96000 );
5778 else if ( inCaps.dwChannels == 1 ) {
5779 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5780 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5781 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5782 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5783 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5784 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5785 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5786 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5788 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
5789 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) rates.push_back( 11025 );
5790 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) rates.push_back( 22050 );
5791 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) rates.push_back( 44100 );
5792 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) rates.push_back( 96000 );
5794 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
5795 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) rates.push_back( 11025 );
5796 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) rates.push_back( 22050 );
5797 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) rates.push_back( 44100 );
5798 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) rates.push_back( 96000 );
5801 else info.inputChannels = 0; // technically, this would be an error
5805 if ( info.inputChannels == 0 ) return info;
5807 // Copy the supported rates to the info structure but avoid duplication.
5809 for ( unsigned int i=0; i<rates.size(); i++ ) {
5811 for ( unsigned int j=0; j<info.sampleRates.size(); j++ ) {
5812 if ( rates[i] == info.sampleRates[j] ) {
5817 if ( found == false ) info.sampleRates.push_back( rates[i] );
5819 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
5821 // If device opens for both playback and capture, we determine the channels.
5822 if ( info.outputChannels > 0 && info.inputChannels > 0 )
5823 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
5825 if ( device == 0 ) info.isDefaultInput = true;
5827 // Copy name and return.
5828 info.name = dsDevices[ device ].name;
5833 bool RtApiDs :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
5834 unsigned int firstChannel, unsigned int sampleRate,
5835 RtAudioFormat format, unsigned int *bufferSize,
5836 RtAudio::StreamOptions *options )
5838 if ( channels + firstChannel > 2 ) {
5839 errorText_ = "RtApiDs::probeDeviceOpen: DirectSound does not support more than 2 channels per device.";
5843 size_t nDevices = dsDevices.size();
5844 if ( nDevices == 0 ) {
5845 // This should not happen because a check is made before this function is called.
5846 errorText_ = "RtApiDs::probeDeviceOpen: no devices found!";
5850 if ( device >= nDevices ) {
5851 // This should not happen because a check is made before this function is called.
5852 errorText_ = "RtApiDs::probeDeviceOpen: device ID is invalid!";
5856 if ( mode == OUTPUT ) {
5857 if ( dsDevices[ device ].validId[0] == false ) {
5858 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support output!";
5859 errorText_ = errorStream_.str();
5863 else { // mode == INPUT
5864 if ( dsDevices[ device ].validId[1] == false ) {
5865 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support input!";
5866 errorText_ = errorStream_.str();
5871 // According to a note in PortAudio, using GetDesktopWindow()
5872 // instead of GetForegroundWindow() is supposed to avoid problems
5873 // that occur when the application's window is not the foreground
5874 // window. Also, if the application window closes before the
5875 // DirectSound buffer, DirectSound can crash. In the past, I had
5876 // problems when using GetDesktopWindow() but it seems fine now
5877 // (January 2010). I'll leave it commented here.
5878 // HWND hWnd = GetForegroundWindow();
5879 HWND hWnd = GetDesktopWindow();
5881 // Check the numberOfBuffers parameter and limit the lowest value to
5882 // two. This is a judgement call and a value of two is probably too
5883 // low for capture, but it should work for playback.
5885 if ( options ) nBuffers = options->numberOfBuffers;
5886 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) nBuffers = 2;
5887 if ( nBuffers < 2 ) nBuffers = 3;
5889 // Check the lower range of the user-specified buffer size and set
5890 // (arbitrarily) to a lower bound of 32.
5891 if ( *bufferSize < 32 ) *bufferSize = 32;
5893 // Create the wave format structure. The data format setting will
5894 // be determined later.
5895 WAVEFORMATEX waveFormat;
5896 ZeroMemory( &waveFormat, sizeof(WAVEFORMATEX) );
5897 waveFormat.wFormatTag = WAVE_FORMAT_PCM;
5898 waveFormat.nChannels = channels + firstChannel;
5899 waveFormat.nSamplesPerSec = (unsigned long) sampleRate;
5901 // Determine the device buffer size. By default, we'll use the value
5902 // defined above (32K), but we will grow it to make allowances for
5903 // very large software buffer sizes.
5904 DWORD dsBufferSize = MINIMUM_DEVICE_BUFFER_SIZE;
5905 DWORD dsPointerLeadTime = 0;
5907 void *ohandle = 0, *bhandle = 0;
5909 if ( mode == OUTPUT ) {
5911 LPDIRECTSOUND output;
5912 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
5913 if ( FAILED( result ) ) {
5914 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
5915 errorText_ = errorStream_.str();
5920 outCaps.dwSize = sizeof( outCaps );
5921 result = output->GetCaps( &outCaps );
5922 if ( FAILED( result ) ) {
5924 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting capabilities (" << dsDevices[ device ].name << ")!";
5925 errorText_ = errorStream_.str();
5929 // Check channel information.
5930 if ( channels + firstChannel == 2 && !( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ) {
5931 errorStream_ << "RtApiDs::getDeviceInfo: the output device (" << dsDevices[ device ].name << ") does not support stereo playback.";
5932 errorText_ = errorStream_.str();
5936 // Check format information. Use 16-bit format unless not
5937 // supported or user requests 8-bit.
5938 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT &&
5939 !( format == RTAUDIO_SINT8 && outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) ) {
5940 waveFormat.wBitsPerSample = 16;
5941 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
5944 waveFormat.wBitsPerSample = 8;
5945 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
5947 stream_.userFormat = format;
5949 // Update wave format structure and buffer information.
5950 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
5951 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
5952 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
5954 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
5955 while ( dsPointerLeadTime * 2U > dsBufferSize )
5958 // Set cooperative level to DSSCL_EXCLUSIVE ... sound stops when window focus changes.
5959 // result = output->SetCooperativeLevel( hWnd, DSSCL_EXCLUSIVE );
5960 // Set cooperative level to DSSCL_PRIORITY ... sound remains when window focus changes.
5961 result = output->SetCooperativeLevel( hWnd, DSSCL_PRIORITY );
5962 if ( FAILED( result ) ) {
5964 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting cooperative level (" << dsDevices[ device ].name << ")!";
5965 errorText_ = errorStream_.str();
5969 // Even though we will write to the secondary buffer, we need to
5970 // access the primary buffer to set the correct output format
5971 // (since the default is 8-bit, 22 kHz!). Setup the DS primary
5972 // buffer description.
5973 DSBUFFERDESC bufferDescription;
5974 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
5975 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
5976 bufferDescription.dwFlags = DSBCAPS_PRIMARYBUFFER;
5978 // Obtain the primary buffer
5979 LPDIRECTSOUNDBUFFER buffer;
5980 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
5981 if ( FAILED( result ) ) {
5983 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") accessing primary buffer (" << dsDevices[ device ].name << ")!";
5984 errorText_ = errorStream_.str();
5988 // Set the primary DS buffer sound format.
5989 result = buffer->SetFormat( &waveFormat );
5990 if ( FAILED( result ) ) {
5992 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting primary buffer format (" << dsDevices[ device ].name << ")!";
5993 errorText_ = errorStream_.str();
5997 // Setup the secondary DS buffer description.
5998 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
5999 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
6000 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
6001 DSBCAPS_GLOBALFOCUS |
6002 DSBCAPS_GETCURRENTPOSITION2 |
6003 DSBCAPS_LOCHARDWARE ); // Force hardware mixing
6004 bufferDescription.dwBufferBytes = dsBufferSize;
6005 bufferDescription.lpwfxFormat = &waveFormat;
6007 // Try to create the secondary DS buffer. If that doesn't work,
6008 // try to use software mixing. Otherwise, there's a problem.
6009 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
6010 if ( FAILED( result ) ) {
6011 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
6012 DSBCAPS_GLOBALFOCUS |
6013 DSBCAPS_GETCURRENTPOSITION2 |
6014 DSBCAPS_LOCSOFTWARE ); // Force software mixing
6015 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
6016 if ( FAILED( result ) ) {
6018 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating secondary buffer (" << dsDevices[ device ].name << ")!";
6019 errorText_ = errorStream_.str();
6024 // Get the buffer size ... might be different from what we specified.
6026 dsbcaps.dwSize = sizeof( DSBCAPS );
6027 result = buffer->GetCaps( &dsbcaps );
6028 if ( FAILED( result ) ) {
6031 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
6032 errorText_ = errorStream_.str();
6036 dsBufferSize = dsbcaps.dwBufferBytes;
6038 // Lock the DS buffer
6041 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
6042 if ( FAILED( result ) ) {
6045 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking buffer (" << dsDevices[ device ].name << ")!";
6046 errorText_ = errorStream_.str();
6050 // Zero the DS buffer
6051 ZeroMemory( audioPtr, dataLen );
6053 // Unlock the DS buffer
6054 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6055 if ( FAILED( result ) ) {
6058 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking buffer (" << dsDevices[ device ].name << ")!";
6059 errorText_ = errorStream_.str();
6063 ohandle = (void *) output;
6064 bhandle = (void *) buffer;
6067 if ( mode == INPUT ) {
6069 LPDIRECTSOUNDCAPTURE input;
6070 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
6071 if ( FAILED( result ) ) {
6072 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
6073 errorText_ = errorStream_.str();
6078 inCaps.dwSize = sizeof( inCaps );
6079 result = input->GetCaps( &inCaps );
6080 if ( FAILED( result ) ) {
6082 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting input capabilities (" << dsDevices[ device ].name << ")!";
6083 errorText_ = errorStream_.str();
6087 // Check channel information.
6088 if ( inCaps.dwChannels < channels + firstChannel ) {
6089 errorText_ = "RtApiDs::getDeviceInfo: the input device does not support requested input channels.";
6093 // Check format information. Use 16-bit format unless user
6095 DWORD deviceFormats;
6096 if ( channels + firstChannel == 2 ) {
6097 deviceFormats = WAVE_FORMAT_1S08 | WAVE_FORMAT_2S08 | WAVE_FORMAT_4S08 | WAVE_FORMAT_96S08;
6098 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
6099 waveFormat.wBitsPerSample = 8;
6100 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
6102 else { // assume 16-bit is supported
6103 waveFormat.wBitsPerSample = 16;
6104 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
6107 else { // channel == 1
6108 deviceFormats = WAVE_FORMAT_1M08 | WAVE_FORMAT_2M08 | WAVE_FORMAT_4M08 | WAVE_FORMAT_96M08;
6109 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
6110 waveFormat.wBitsPerSample = 8;
6111 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
6113 else { // assume 16-bit is supported
6114 waveFormat.wBitsPerSample = 16;
6115 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
6118 stream_.userFormat = format;
6120 // Update wave format structure and buffer information.
6121 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
6122 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
6123 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
6125 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
6126 while ( dsPointerLeadTime * 2U > dsBufferSize )
6129 // Setup the secondary DS buffer description.
6130 DSCBUFFERDESC bufferDescription;
6131 ZeroMemory( &bufferDescription, sizeof( DSCBUFFERDESC ) );
6132 bufferDescription.dwSize = sizeof( DSCBUFFERDESC );
6133 bufferDescription.dwFlags = 0;
6134 bufferDescription.dwReserved = 0;
6135 bufferDescription.dwBufferBytes = dsBufferSize;
6136 bufferDescription.lpwfxFormat = &waveFormat;
6138 // Create the capture buffer.
6139 LPDIRECTSOUNDCAPTUREBUFFER buffer;
6140 result = input->CreateCaptureBuffer( &bufferDescription, &buffer, NULL );
6141 if ( FAILED( result ) ) {
6143 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating input buffer (" << dsDevices[ device ].name << ")!";
6144 errorText_ = errorStream_.str();
6148 // Get the buffer size ... might be different from what we specified.
6150 dscbcaps.dwSize = sizeof( DSCBCAPS );
6151 result = buffer->GetCaps( &dscbcaps );
6152 if ( FAILED( result ) ) {
6155 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
6156 errorText_ = errorStream_.str();
6160 dsBufferSize = dscbcaps.dwBufferBytes;
6162 // NOTE: We could have a problem here if this is a duplex stream
6163 // and the play and capture hardware buffer sizes are different
6164 // (I'm actually not sure if that is a problem or not).
6165 // Currently, we are not verifying that.
6167 // Lock the capture buffer
6170 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
6171 if ( FAILED( result ) ) {
6174 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking input buffer (" << dsDevices[ device ].name << ")!";
6175 errorText_ = errorStream_.str();
6180 ZeroMemory( audioPtr, dataLen );
6182 // Unlock the buffer
6183 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6184 if ( FAILED( result ) ) {
6187 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking input buffer (" << dsDevices[ device ].name << ")!";
6188 errorText_ = errorStream_.str();
6192 ohandle = (void *) input;
6193 bhandle = (void *) buffer;
6196 // Set various stream parameters
6197 DsHandle *handle = 0;
6198 stream_.nDeviceChannels[mode] = channels + firstChannel;
6199 stream_.nUserChannels[mode] = channels;
6200 stream_.bufferSize = *bufferSize;
6201 stream_.channelOffset[mode] = firstChannel;
6202 stream_.deviceInterleaved[mode] = true;
6203 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
6204 else stream_.userInterleaved = true;
6206 // Set flag for buffer conversion
6207 stream_.doConvertBuffer[mode] = false;
6208 if (stream_.nUserChannels[mode] != stream_.nDeviceChannels[mode])
6209 stream_.doConvertBuffer[mode] = true;
6210 if (stream_.userFormat != stream_.deviceFormat[mode])
6211 stream_.doConvertBuffer[mode] = true;
6212 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
6213 stream_.nUserChannels[mode] > 1 )
6214 stream_.doConvertBuffer[mode] = true;
6216 // Allocate necessary internal buffers
6217 long bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
6218 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
6219 if ( stream_.userBuffer[mode] == NULL ) {
6220 errorText_ = "RtApiDs::probeDeviceOpen: error allocating user buffer memory.";
6224 if ( stream_.doConvertBuffer[mode] ) {
6226 bool makeBuffer = true;
6227 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
6228 if ( mode == INPUT ) {
6229 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
6230 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
6231 if ( bufferBytes <= (long) bytesOut ) makeBuffer = false;
6236 bufferBytes *= *bufferSize;
6237 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
6238 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
6239 if ( stream_.deviceBuffer == NULL ) {
6240 errorText_ = "RtApiDs::probeDeviceOpen: error allocating device buffer memory.";
6246 // Allocate our DsHandle structures for the stream.
6247 if ( stream_.apiHandle == 0 ) {
6249 handle = new DsHandle;
6251 catch ( std::bad_alloc& ) {
6252 errorText_ = "RtApiDs::probeDeviceOpen: error allocating AsioHandle memory.";
6256 // Create a manual-reset event.
6257 handle->condition = CreateEvent( NULL, // no security
6258 TRUE, // manual-reset
6259 FALSE, // non-signaled initially
6261 stream_.apiHandle = (void *) handle;
6264 handle = (DsHandle *) stream_.apiHandle;
6265 handle->id[mode] = ohandle;
6266 handle->buffer[mode] = bhandle;
6267 handle->dsBufferSize[mode] = dsBufferSize;
6268 handle->dsPointerLeadTime[mode] = dsPointerLeadTime;
6270 stream_.device[mode] = device;
6271 stream_.state = STREAM_STOPPED;
6272 if ( stream_.mode == OUTPUT && mode == INPUT )
6273 // We had already set up an output stream.
6274 stream_.mode = DUPLEX;
6276 stream_.mode = mode;
6277 stream_.nBuffers = nBuffers;
6278 stream_.sampleRate = sampleRate;
6280 // Setup the buffer conversion information structure.
6281 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
6283 // Setup the callback thread.
6284 if ( stream_.callbackInfo.isRunning == false ) {
6286 stream_.callbackInfo.isRunning = true;
6287 stream_.callbackInfo.object = (void *) this;
6288 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &callbackHandler,
6289 &stream_.callbackInfo, 0, &threadId );
6290 if ( stream_.callbackInfo.thread == 0 ) {
6291 errorText_ = "RtApiDs::probeDeviceOpen: error creating callback thread!";
6295 // Boost DS thread priority
6296 SetThreadPriority( (HANDLE) stream_.callbackInfo.thread, THREAD_PRIORITY_HIGHEST );
6302 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
6303 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
6304 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6305 if ( buffer ) buffer->Release();
6308 if ( handle->buffer[1] ) {
6309 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
6310 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6311 if ( buffer ) buffer->Release();
6314 CloseHandle( handle->condition );
6316 stream_.apiHandle = 0;
6319 for ( int i=0; i<2; i++ ) {
6320 if ( stream_.userBuffer[i] ) {
6321 free( stream_.userBuffer[i] );
6322 stream_.userBuffer[i] = 0;
6326 if ( stream_.deviceBuffer ) {
6327 free( stream_.deviceBuffer );
6328 stream_.deviceBuffer = 0;
6331 stream_.state = STREAM_CLOSED;
6335 void RtApiDs :: closeStream()
6337 if ( stream_.state == STREAM_CLOSED ) {
6338 errorText_ = "RtApiDs::closeStream(): no open stream to close!";
6339 error( RtAudioError::WARNING );
6343 // Stop the callback thread.
6344 stream_.callbackInfo.isRunning = false;
6345 WaitForSingleObject( (HANDLE) stream_.callbackInfo.thread, INFINITE );
6346 CloseHandle( (HANDLE) stream_.callbackInfo.thread );
6348 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6350 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
6351 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
6352 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6359 if ( handle->buffer[1] ) {
6360 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
6361 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6368 CloseHandle( handle->condition );
6370 stream_.apiHandle = 0;
6373 for ( int i=0; i<2; i++ ) {
6374 if ( stream_.userBuffer[i] ) {
6375 free( stream_.userBuffer[i] );
6376 stream_.userBuffer[i] = 0;
6380 if ( stream_.deviceBuffer ) {
6381 free( stream_.deviceBuffer );
6382 stream_.deviceBuffer = 0;
6385 stream_.mode = UNINITIALIZED;
6386 stream_.state = STREAM_CLOSED;
6389 void RtApiDs :: startStream()
6392 if ( stream_.state == STREAM_RUNNING ) {
6393 errorText_ = "RtApiDs::startStream(): the stream is already running!";
6394 error( RtAudioError::WARNING );
6398 #if defined( HAVE_GETTIMEOFDAY )
6399 gettimeofday( &stream_.lastTickTimestamp, NULL );
6402 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6404 // Increase scheduler frequency on lesser windows (a side-effect of
6405 // increasing timer accuracy). On greater windows (Win2K or later),
6406 // this is already in effect.
6407 timeBeginPeriod( 1 );
6409 buffersRolling = false;
6410 duplexPrerollBytes = 0;
6412 if ( stream_.mode == DUPLEX ) {
6413 // 0.5 seconds of silence in DUPLEX mode while the devices spin up and synchronize.
6414 duplexPrerollBytes = (int) ( 0.5 * stream_.sampleRate * formatBytes( stream_.deviceFormat[1] ) * stream_.nDeviceChannels[1] );
6418 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6420 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6421 result = buffer->Play( 0, 0, DSBPLAY_LOOPING );
6422 if ( FAILED( result ) ) {
6423 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting output buffer!";
6424 errorText_ = errorStream_.str();
6429 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6431 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6432 result = buffer->Start( DSCBSTART_LOOPING );
6433 if ( FAILED( result ) ) {
6434 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting input buffer!";
6435 errorText_ = errorStream_.str();
6440 handle->drainCounter = 0;
6441 handle->internalDrain = false;
6442 ResetEvent( handle->condition );
6443 stream_.state = STREAM_RUNNING;
6446 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
6449 void RtApiDs :: stopStream()
6452 if ( stream_.state == STREAM_STOPPED ) {
6453 errorText_ = "RtApiDs::stopStream(): the stream is already stopped!";
6454 error( RtAudioError::WARNING );
6461 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6462 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6463 if ( handle->drainCounter == 0 ) {
6464 handle->drainCounter = 2;
6465 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
6468 stream_.state = STREAM_STOPPED;
6470 MUTEX_LOCK( &stream_.mutex );
6472 // Stop the buffer and clear memory
6473 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6474 result = buffer->Stop();
6475 if ( FAILED( result ) ) {
6476 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping output buffer!";
6477 errorText_ = errorStream_.str();
6481 // Lock the buffer and clear it so that if we start to play again,
6482 // we won't have old data playing.
6483 result = buffer->Lock( 0, handle->dsBufferSize[0], &audioPtr, &dataLen, NULL, NULL, 0 );
6484 if ( FAILED( result ) ) {
6485 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking output buffer!";
6486 errorText_ = errorStream_.str();
6490 // Zero the DS buffer
6491 ZeroMemory( audioPtr, dataLen );
6493 // Unlock the DS buffer
6494 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6495 if ( FAILED( result ) ) {
6496 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking output buffer!";
6497 errorText_ = errorStream_.str();
6501 // If we start playing again, we must begin at beginning of buffer.
6502 handle->bufferPointer[0] = 0;
6505 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6506 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6510 stream_.state = STREAM_STOPPED;
6512 if ( stream_.mode != DUPLEX )
6513 MUTEX_LOCK( &stream_.mutex );
6515 result = buffer->Stop();
6516 if ( FAILED( result ) ) {
6517 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping input buffer!";
6518 errorText_ = errorStream_.str();
6522 // Lock the buffer and clear it so that if we start to play again,
6523 // we won't have old data playing.
6524 result = buffer->Lock( 0, handle->dsBufferSize[1], &audioPtr, &dataLen, NULL, NULL, 0 );
6525 if ( FAILED( result ) ) {
6526 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking input buffer!";
6527 errorText_ = errorStream_.str();
6531 // Zero the DS buffer
6532 ZeroMemory( audioPtr, dataLen );
6534 // Unlock the DS buffer
6535 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6536 if ( FAILED( result ) ) {
6537 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking input buffer!";
6538 errorText_ = errorStream_.str();
6542 // If we start recording again, we must begin at beginning of buffer.
6543 handle->bufferPointer[1] = 0;
6547 timeEndPeriod( 1 ); // revert to normal scheduler frequency on lesser windows.
6548 MUTEX_UNLOCK( &stream_.mutex );
6550 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
6553 void RtApiDs :: abortStream()
6556 if ( stream_.state == STREAM_STOPPED ) {
6557 errorText_ = "RtApiDs::abortStream(): the stream is already stopped!";
6558 error( RtAudioError::WARNING );
6562 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6563 handle->drainCounter = 2;
6568 void RtApiDs :: callbackEvent()
6570 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) {
6571 Sleep( 50 ); // sleep 50 milliseconds
6575 if ( stream_.state == STREAM_CLOSED ) {
6576 errorText_ = "RtApiDs::callbackEvent(): the stream is closed ... this shouldn't happen!";
6577 error( RtAudioError::WARNING );
6581 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
6582 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6584 // Check if we were draining the stream and signal is finished.
6585 if ( handle->drainCounter > stream_.nBuffers + 2 ) {
6587 stream_.state = STREAM_STOPPING;
6588 if ( handle->internalDrain == false )
6589 SetEvent( handle->condition );
6595 // Invoke user callback to get fresh output data UNLESS we are
6597 if ( handle->drainCounter == 0 ) {
6598 RtAudioCallback callback = (RtAudioCallback) info->callback;
6599 double streamTime = getStreamTime();
6600 RtAudioStreamStatus status = 0;
6601 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
6602 status |= RTAUDIO_OUTPUT_UNDERFLOW;
6603 handle->xrun[0] = false;
6605 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
6606 status |= RTAUDIO_INPUT_OVERFLOW;
6607 handle->xrun[1] = false;
6609 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
6610 stream_.bufferSize, streamTime, status, info->userData );
6611 if ( cbReturnValue == 2 ) {
6612 stream_.state = STREAM_STOPPING;
6613 handle->drainCounter = 2;
6617 else if ( cbReturnValue == 1 ) {
6618 handle->drainCounter = 1;
6619 handle->internalDrain = true;
6624 DWORD currentWritePointer, safeWritePointer;
6625 DWORD currentReadPointer, safeReadPointer;
6626 UINT nextWritePointer;
6628 LPVOID buffer1 = NULL;
6629 LPVOID buffer2 = NULL;
6630 DWORD bufferSize1 = 0;
6631 DWORD bufferSize2 = 0;
6636 MUTEX_LOCK( &stream_.mutex );
6637 if ( stream_.state == STREAM_STOPPED ) {
6638 MUTEX_UNLOCK( &stream_.mutex );
6642 if ( buffersRolling == false ) {
6643 if ( stream_.mode == DUPLEX ) {
6644 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
6646 // It takes a while for the devices to get rolling. As a result,
6647 // there's no guarantee that the capture and write device pointers
6648 // will move in lockstep. Wait here for both devices to start
6649 // rolling, and then set our buffer pointers accordingly.
6650 // e.g. Crystal Drivers: the capture buffer starts up 5700 to 9600
6651 // bytes later than the write buffer.
6653 // Stub: a serious risk of having a pre-emptive scheduling round
6654 // take place between the two GetCurrentPosition calls... but I'm
6655 // really not sure how to solve the problem. Temporarily boost to
6656 // Realtime priority, maybe; but I'm not sure what priority the
6657 // DirectSound service threads run at. We *should* be roughly
6658 // within a ms or so of correct.
6660 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6661 LPDIRECTSOUNDCAPTUREBUFFER dsCaptureBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6663 DWORD startSafeWritePointer, startSafeReadPointer;
6665 result = dsWriteBuffer->GetCurrentPosition( NULL, &startSafeWritePointer );
6666 if ( FAILED( result ) ) {
6667 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6668 errorText_ = errorStream_.str();
6669 MUTEX_UNLOCK( &stream_.mutex );
6670 error( RtAudioError::SYSTEM_ERROR );
6673 result = dsCaptureBuffer->GetCurrentPosition( NULL, &startSafeReadPointer );
6674 if ( FAILED( result ) ) {
6675 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6676 errorText_ = errorStream_.str();
6677 MUTEX_UNLOCK( &stream_.mutex );
6678 error( RtAudioError::SYSTEM_ERROR );
6682 result = dsWriteBuffer->GetCurrentPosition( NULL, &safeWritePointer );
6683 if ( FAILED( result ) ) {
6684 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6685 errorText_ = errorStream_.str();
6686 MUTEX_UNLOCK( &stream_.mutex );
6687 error( RtAudioError::SYSTEM_ERROR );
6690 result = dsCaptureBuffer->GetCurrentPosition( NULL, &safeReadPointer );
6691 if ( FAILED( result ) ) {
6692 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6693 errorText_ = errorStream_.str();
6694 MUTEX_UNLOCK( &stream_.mutex );
6695 error( RtAudioError::SYSTEM_ERROR );
6698 if ( safeWritePointer != startSafeWritePointer && safeReadPointer != startSafeReadPointer ) break;
6702 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
6704 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
6705 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
6706 handle->bufferPointer[1] = safeReadPointer;
6708 else if ( stream_.mode == OUTPUT ) {
6710 // Set the proper nextWritePosition after initial startup.
6711 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6712 result = dsWriteBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
6713 if ( FAILED( result ) ) {
6714 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6715 errorText_ = errorStream_.str();
6716 MUTEX_UNLOCK( &stream_.mutex );
6717 error( RtAudioError::SYSTEM_ERROR );
6720 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
6721 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
6724 buffersRolling = true;
6727 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6729 LPDIRECTSOUNDBUFFER dsBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6731 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
6732 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
6733 bufferBytes *= formatBytes( stream_.userFormat );
6734 memset( stream_.userBuffer[0], 0, bufferBytes );
6737 // Setup parameters and do buffer conversion if necessary.
6738 if ( stream_.doConvertBuffer[0] ) {
6739 buffer = stream_.deviceBuffer;
6740 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
6741 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[0];
6742 bufferBytes *= formatBytes( stream_.deviceFormat[0] );
6745 buffer = stream_.userBuffer[0];
6746 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
6747 bufferBytes *= formatBytes( stream_.userFormat );
6750 // No byte swapping necessary in DirectSound implementation.
6752 // Ahhh ... windoze. 16-bit data is signed but 8-bit data is
6753 // unsigned. So, we need to convert our signed 8-bit data here to
6755 if ( stream_.deviceFormat[0] == RTAUDIO_SINT8 )
6756 for ( int i=0; i<bufferBytes; i++ ) buffer[i] = (unsigned char) ( buffer[i] + 128 );
6758 DWORD dsBufferSize = handle->dsBufferSize[0];
6759 nextWritePointer = handle->bufferPointer[0];
6761 DWORD endWrite, leadPointer;
6763 // Find out where the read and "safe write" pointers are.
6764 result = dsBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
6765 if ( FAILED( result ) ) {
6766 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6767 errorText_ = errorStream_.str();
6768 MUTEX_UNLOCK( &stream_.mutex );
6769 error( RtAudioError::SYSTEM_ERROR );
6773 // We will copy our output buffer into the region between
6774 // safeWritePointer and leadPointer. If leadPointer is not
6775 // beyond the next endWrite position, wait until it is.
6776 leadPointer = safeWritePointer + handle->dsPointerLeadTime[0];
6777 //std::cout << "safeWritePointer = " << safeWritePointer << ", leadPointer = " << leadPointer << ", nextWritePointer = " << nextWritePointer << std::endl;
6778 if ( leadPointer > dsBufferSize ) leadPointer -= dsBufferSize;
6779 if ( leadPointer < nextWritePointer ) leadPointer += dsBufferSize; // unwrap offset
6780 endWrite = nextWritePointer + bufferBytes;
6782 // Check whether the entire write region is behind the play pointer.
6783 if ( leadPointer >= endWrite ) break;
6785 // If we are here, then we must wait until the leadPointer advances
6786 // beyond the end of our next write region. We use the
6787 // Sleep() function to suspend operation until that happens.
6788 double millis = ( endWrite - leadPointer ) * 1000.0;
6789 millis /= ( formatBytes( stream_.deviceFormat[0]) * stream_.nDeviceChannels[0] * stream_.sampleRate);
6790 if ( millis < 1.0 ) millis = 1.0;
6791 Sleep( (DWORD) millis );
6794 if ( dsPointerBetween( nextWritePointer, safeWritePointer, currentWritePointer, dsBufferSize )
6795 || dsPointerBetween( endWrite, safeWritePointer, currentWritePointer, dsBufferSize ) ) {
6796 // We've strayed into the forbidden zone ... resync the read pointer.
6797 handle->xrun[0] = true;
6798 nextWritePointer = safeWritePointer + handle->dsPointerLeadTime[0] - bufferBytes;
6799 if ( nextWritePointer >= dsBufferSize ) nextWritePointer -= dsBufferSize;
6800 handle->bufferPointer[0] = nextWritePointer;
6801 endWrite = nextWritePointer + bufferBytes;
6804 // Lock free space in the buffer
6805 result = dsBuffer->Lock( nextWritePointer, bufferBytes, &buffer1,
6806 &bufferSize1, &buffer2, &bufferSize2, 0 );
6807 if ( FAILED( result ) ) {
6808 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking buffer during playback!";
6809 errorText_ = errorStream_.str();
6810 MUTEX_UNLOCK( &stream_.mutex );
6811 error( RtAudioError::SYSTEM_ERROR );
6815 // Copy our buffer into the DS buffer
6816 CopyMemory( buffer1, buffer, bufferSize1 );
6817 if ( buffer2 != NULL ) CopyMemory( buffer2, buffer+bufferSize1, bufferSize2 );
6819 // Update our buffer offset and unlock sound buffer
6820 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
6821 if ( FAILED( result ) ) {
6822 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking buffer during playback!";
6823 errorText_ = errorStream_.str();
6824 MUTEX_UNLOCK( &stream_.mutex );
6825 error( RtAudioError::SYSTEM_ERROR );
6828 nextWritePointer = ( nextWritePointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
6829 handle->bufferPointer[0] = nextWritePointer;
6832 // Don't bother draining input
6833 if ( handle->drainCounter ) {
6834 handle->drainCounter++;
6838 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6840 // Setup parameters.
6841 if ( stream_.doConvertBuffer[1] ) {
6842 buffer = stream_.deviceBuffer;
6843 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[1];
6844 bufferBytes *= formatBytes( stream_.deviceFormat[1] );
6847 buffer = stream_.userBuffer[1];
6848 bufferBytes = stream_.bufferSize * stream_.nUserChannels[1];
6849 bufferBytes *= formatBytes( stream_.userFormat );
6852 LPDIRECTSOUNDCAPTUREBUFFER dsBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6853 long nextReadPointer = handle->bufferPointer[1];
6854 DWORD dsBufferSize = handle->dsBufferSize[1];
6856 // Find out where the write and "safe read" pointers are.
6857 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
6858 if ( FAILED( result ) ) {
6859 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6860 errorText_ = errorStream_.str();
6861 MUTEX_UNLOCK( &stream_.mutex );
6862 error( RtAudioError::SYSTEM_ERROR );
6866 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
6867 DWORD endRead = nextReadPointer + bufferBytes;
6869 // Handling depends on whether we are INPUT or DUPLEX.
6870 // If we're in INPUT mode then waiting is a good thing. If we're in DUPLEX mode,
6871 // then a wait here will drag the write pointers into the forbidden zone.
6873 // In DUPLEX mode, rather than wait, we will back off the read pointer until
6874 // it's in a safe position. This causes dropouts, but it seems to be the only
6875 // practical way to sync up the read and write pointers reliably, given the
6876 // the very complex relationship between phase and increment of the read and write
6879 // In order to minimize audible dropouts in DUPLEX mode, we will
6880 // provide a pre-roll period of 0.5 seconds in which we return
6881 // zeros from the read buffer while the pointers sync up.
6883 if ( stream_.mode == DUPLEX ) {
6884 if ( safeReadPointer < endRead ) {
6885 if ( duplexPrerollBytes <= 0 ) {
6886 // Pre-roll time over. Be more agressive.
6887 int adjustment = endRead-safeReadPointer;
6889 handle->xrun[1] = true;
6891 // - large adjustments: we've probably run out of CPU cycles, so just resync exactly,
6892 // and perform fine adjustments later.
6893 // - small adjustments: back off by twice as much.
6894 if ( adjustment >= 2*bufferBytes )
6895 nextReadPointer = safeReadPointer-2*bufferBytes;
6897 nextReadPointer = safeReadPointer-bufferBytes-adjustment;
6899 if ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
6903 // In pre=roll time. Just do it.
6904 nextReadPointer = safeReadPointer - bufferBytes;
6905 while ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
6907 endRead = nextReadPointer + bufferBytes;
6910 else { // mode == INPUT
6911 while ( safeReadPointer < endRead && stream_.callbackInfo.isRunning ) {
6912 // See comments for playback.
6913 double millis = (endRead - safeReadPointer) * 1000.0;
6914 millis /= ( formatBytes(stream_.deviceFormat[1]) * stream_.nDeviceChannels[1] * stream_.sampleRate);
6915 if ( millis < 1.0 ) millis = 1.0;
6916 Sleep( (DWORD) millis );
6918 // Wake up and find out where we are now.
6919 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
6920 if ( FAILED( result ) ) {
6921 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6922 errorText_ = errorStream_.str();
6923 MUTEX_UNLOCK( &stream_.mutex );
6924 error( RtAudioError::SYSTEM_ERROR );
6928 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
6932 // Lock free space in the buffer
6933 result = dsBuffer->Lock( nextReadPointer, bufferBytes, &buffer1,
6934 &bufferSize1, &buffer2, &bufferSize2, 0 );
6935 if ( FAILED( result ) ) {
6936 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking capture buffer!";
6937 errorText_ = errorStream_.str();
6938 MUTEX_UNLOCK( &stream_.mutex );
6939 error( RtAudioError::SYSTEM_ERROR );
6943 if ( duplexPrerollBytes <= 0 ) {
6944 // Copy our buffer into the DS buffer
6945 CopyMemory( buffer, buffer1, bufferSize1 );
6946 if ( buffer2 != NULL ) CopyMemory( buffer+bufferSize1, buffer2, bufferSize2 );
6949 memset( buffer, 0, bufferSize1 );
6950 if ( buffer2 != NULL ) memset( buffer + bufferSize1, 0, bufferSize2 );
6951 duplexPrerollBytes -= bufferSize1 + bufferSize2;
6954 // Update our buffer offset and unlock sound buffer
6955 nextReadPointer = ( nextReadPointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
6956 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
6957 if ( FAILED( result ) ) {
6958 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking capture buffer!";
6959 errorText_ = errorStream_.str();
6960 MUTEX_UNLOCK( &stream_.mutex );
6961 error( RtAudioError::SYSTEM_ERROR );
6964 handle->bufferPointer[1] = nextReadPointer;
6966 // No byte swapping necessary in DirectSound implementation.
6968 // If necessary, convert 8-bit data from unsigned to signed.
6969 if ( stream_.deviceFormat[1] == RTAUDIO_SINT8 )
6970 for ( int j=0; j<bufferBytes; j++ ) buffer[j] = (signed char) ( buffer[j] - 128 );
6972 // Do buffer conversion if necessary.
6973 if ( stream_.doConvertBuffer[1] )
6974 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
6978 MUTEX_UNLOCK( &stream_.mutex );
6979 RtApi::tickStreamTime();
6982 // Definitions for utility functions and callbacks
6983 // specific to the DirectSound implementation.
6985 static unsigned __stdcall callbackHandler( void *ptr )
6987 CallbackInfo *info = (CallbackInfo *) ptr;
6988 RtApiDs *object = (RtApiDs *) info->object;
6989 bool* isRunning = &info->isRunning;
6991 while ( *isRunning == true ) {
6992 object->callbackEvent();
6999 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
7000 LPCTSTR description,
7004 struct DsProbeData& probeInfo = *(struct DsProbeData*) lpContext;
7005 std::vector<struct DsDevice>& dsDevices = *probeInfo.dsDevices;
7008 bool validDevice = false;
7009 if ( probeInfo.isInput == true ) {
7011 LPDIRECTSOUNDCAPTURE object;
7013 hr = DirectSoundCaptureCreate( lpguid, &object, NULL );
7014 if ( hr != DS_OK ) return TRUE;
7016 caps.dwSize = sizeof(caps);
7017 hr = object->GetCaps( &caps );
7018 if ( hr == DS_OK ) {
7019 if ( caps.dwChannels > 0 && caps.dwFormats > 0 )
7026 LPDIRECTSOUND object;
7027 hr = DirectSoundCreate( lpguid, &object, NULL );
7028 if ( hr != DS_OK ) return TRUE;
7030 caps.dwSize = sizeof(caps);
7031 hr = object->GetCaps( &caps );
7032 if ( hr == DS_OK ) {
7033 if ( caps.dwFlags & DSCAPS_PRIMARYMONO || caps.dwFlags & DSCAPS_PRIMARYSTEREO )
7039 // If good device, then save its name and guid.
7040 std::string name = convertCharPointerToStdString( description );
7041 //if ( name == "Primary Sound Driver" || name == "Primary Sound Capture Driver" )
7042 if ( lpguid == NULL )
7043 name = "Default Device";
7044 if ( validDevice ) {
7045 for ( unsigned int i=0; i<dsDevices.size(); i++ ) {
7046 if ( dsDevices[i].name == name ) {
7047 dsDevices[i].found = true;
7048 if ( probeInfo.isInput ) {
7049 dsDevices[i].id[1] = lpguid;
7050 dsDevices[i].validId[1] = true;
7053 dsDevices[i].id[0] = lpguid;
7054 dsDevices[i].validId[0] = true;
7062 device.found = true;
7063 if ( probeInfo.isInput ) {
7064 device.id[1] = lpguid;
7065 device.validId[1] = true;
7068 device.id[0] = lpguid;
7069 device.validId[0] = true;
7071 dsDevices.push_back( device );
7077 static const char* getErrorString( int code )
7081 case DSERR_ALLOCATED:
7082 return "Already allocated";
7084 case DSERR_CONTROLUNAVAIL:
7085 return "Control unavailable";
7087 case DSERR_INVALIDPARAM:
7088 return "Invalid parameter";
7090 case DSERR_INVALIDCALL:
7091 return "Invalid call";
7094 return "Generic error";
7096 case DSERR_PRIOLEVELNEEDED:
7097 return "Priority level needed";
7099 case DSERR_OUTOFMEMORY:
7100 return "Out of memory";
7102 case DSERR_BADFORMAT:
7103 return "The sample rate or the channel format is not supported";
7105 case DSERR_UNSUPPORTED:
7106 return "Not supported";
7108 case DSERR_NODRIVER:
7111 case DSERR_ALREADYINITIALIZED:
7112 return "Already initialized";
7114 case DSERR_NOAGGREGATION:
7115 return "No aggregation";
7117 case DSERR_BUFFERLOST:
7118 return "Buffer lost";
7120 case DSERR_OTHERAPPHASPRIO:
7121 return "Another application already has priority";
7123 case DSERR_UNINITIALIZED:
7124 return "Uninitialized";
7127 return "DirectSound unknown error";
7130 //******************** End of __WINDOWS_DS__ *********************//
7134 #if defined(__LINUX_ALSA__)
7136 #include <alsa/asoundlib.h>
7139 // A structure to hold various information related to the ALSA API
7142 snd_pcm_t *handles[2];
7145 pthread_cond_t runnable_cv;
7149 :synchronized(false), runnable(false) { xrun[0] = false; xrun[1] = false; }
7152 static void *alsaCallbackHandler( void * ptr );
7154 RtApiAlsa :: RtApiAlsa()
7156 // Nothing to do here.
7159 RtApiAlsa :: ~RtApiAlsa()
7161 if ( stream_.state != STREAM_CLOSED ) closeStream();
7164 unsigned int RtApiAlsa :: getDeviceCount( void )
7166 unsigned nDevices = 0;
7167 int result, subdevice, card;
7169 snd_ctl_t *handle = 0;
7171 // Count cards and devices
7173 snd_card_next( &card );
7174 while ( card >= 0 ) {
7175 sprintf( name, "hw:%d", card );
7176 result = snd_ctl_open( &handle, name, 0 );
7179 errorStream_ << "RtApiAlsa::getDeviceCount: control open, card = " << card << ", " << snd_strerror( result ) << ".";
7180 errorText_ = errorStream_.str();
7181 error( RtAudioError::WARNING );
7186 result = snd_ctl_pcm_next_device( handle, &subdevice );
7188 errorStream_ << "RtApiAlsa::getDeviceCount: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
7189 errorText_ = errorStream_.str();
7190 error( RtAudioError::WARNING );
7193 if ( subdevice < 0 )
7199 snd_ctl_close( handle );
7200 snd_card_next( &card );
7203 result = snd_ctl_open( &handle, "default", 0 );
7206 snd_ctl_close( handle );
7212 RtAudio::DeviceInfo RtApiAlsa :: getDeviceInfo( unsigned int device )
7214 RtAudio::DeviceInfo info;
7215 info.probed = false;
7217 unsigned nDevices = 0;
7218 int result, subdevice, card;
7220 snd_ctl_t *chandle = 0;
7222 // Count cards and devices
7225 snd_card_next( &card );
7226 while ( card >= 0 ) {
7227 sprintf( name, "hw:%d", card );
7228 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
7231 errorStream_ << "RtApiAlsa::getDeviceInfo: control open, card = " << card << ", " << snd_strerror( result ) << ".";
7232 errorText_ = errorStream_.str();
7233 error( RtAudioError::WARNING );
7238 result = snd_ctl_pcm_next_device( chandle, &subdevice );
7240 errorStream_ << "RtApiAlsa::getDeviceInfo: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
7241 errorText_ = errorStream_.str();
7242 error( RtAudioError::WARNING );
7245 if ( subdevice < 0 ) break;
7246 if ( nDevices == device ) {
7247 sprintf( name, "hw:%d,%d", card, subdevice );
7254 snd_ctl_close( chandle );
7255 snd_card_next( &card );
7258 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
7259 if ( result == 0 ) {
7260 if ( nDevices == device ) {
7261 strcpy( name, "default" );
7267 if ( nDevices == 0 ) {
7268 errorText_ = "RtApiAlsa::getDeviceInfo: no devices found!";
7269 error( RtAudioError::INVALID_USE );
7273 if ( device >= nDevices ) {
7274 errorText_ = "RtApiAlsa::getDeviceInfo: device ID is invalid!";
7275 error( RtAudioError::INVALID_USE );
7281 // If a stream is already open, we cannot probe the stream devices.
7282 // Thus, use the saved results.
7283 if ( stream_.state != STREAM_CLOSED &&
7284 ( stream_.device[0] == device || stream_.device[1] == device ) ) {
7285 snd_ctl_close( chandle );
7286 if ( device >= devices_.size() ) {
7287 errorText_ = "RtApiAlsa::getDeviceInfo: device ID was not present before stream was opened.";
7288 error( RtAudioError::WARNING );
7291 return devices_[ device ];
7294 int openMode = SND_PCM_ASYNC;
7295 snd_pcm_stream_t stream;
7296 snd_pcm_info_t *pcminfo;
7297 snd_pcm_info_alloca( &pcminfo );
7299 snd_pcm_hw_params_t *params;
7300 snd_pcm_hw_params_alloca( ¶ms );
7302 // First try for playback unless default device (which has subdev -1)
7303 stream = SND_PCM_STREAM_PLAYBACK;
7304 snd_pcm_info_set_stream( pcminfo, stream );
7305 if ( subdevice != -1 ) {
7306 snd_pcm_info_set_device( pcminfo, subdevice );
7307 snd_pcm_info_set_subdevice( pcminfo, 0 );
7309 result = snd_ctl_pcm_info( chandle, pcminfo );
7311 // Device probably doesn't support playback.
7316 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK );
7318 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7319 errorText_ = errorStream_.str();
7320 error( RtAudioError::WARNING );
7324 // The device is open ... fill the parameter structure.
7325 result = snd_pcm_hw_params_any( phandle, params );
7327 snd_pcm_close( phandle );
7328 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7329 errorText_ = errorStream_.str();
7330 error( RtAudioError::WARNING );
7334 // Get output channel information.
7336 result = snd_pcm_hw_params_get_channels_max( params, &value );
7338 snd_pcm_close( phandle );
7339 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") output channels, " << snd_strerror( result ) << ".";
7340 errorText_ = errorStream_.str();
7341 error( RtAudioError::WARNING );
7344 info.outputChannels = value;
7345 snd_pcm_close( phandle );
7348 stream = SND_PCM_STREAM_CAPTURE;
7349 snd_pcm_info_set_stream( pcminfo, stream );
7351 // Now try for capture unless default device (with subdev = -1)
7352 if ( subdevice != -1 ) {
7353 result = snd_ctl_pcm_info( chandle, pcminfo );
7354 snd_ctl_close( chandle );
7356 // Device probably doesn't support capture.
7357 if ( info.outputChannels == 0 ) return info;
7358 goto probeParameters;
7362 snd_ctl_close( chandle );
7364 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
7366 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7367 errorText_ = errorStream_.str();
7368 error( RtAudioError::WARNING );
7369 if ( info.outputChannels == 0 ) return info;
7370 goto probeParameters;
7373 // The device is open ... fill the parameter structure.
7374 result = snd_pcm_hw_params_any( phandle, params );
7376 snd_pcm_close( phandle );
7377 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7378 errorText_ = errorStream_.str();
7379 error( RtAudioError::WARNING );
7380 if ( info.outputChannels == 0 ) return info;
7381 goto probeParameters;
7384 result = snd_pcm_hw_params_get_channels_max( params, &value );
7386 snd_pcm_close( phandle );
7387 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") input channels, " << snd_strerror( result ) << ".";
7388 errorText_ = errorStream_.str();
7389 error( RtAudioError::WARNING );
7390 if ( info.outputChannels == 0 ) return info;
7391 goto probeParameters;
7393 info.inputChannels = value;
7394 snd_pcm_close( phandle );
7396 // If device opens for both playback and capture, we determine the channels.
7397 if ( info.outputChannels > 0 && info.inputChannels > 0 )
7398 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
7400 // ALSA doesn't provide default devices so we'll use the first available one.
7401 if ( device == 0 && info.outputChannels > 0 )
7402 info.isDefaultOutput = true;
7403 if ( device == 0 && info.inputChannels > 0 )
7404 info.isDefaultInput = true;
7407 // At this point, we just need to figure out the supported data
7408 // formats and sample rates. We'll proceed by opening the device in
7409 // the direction with the maximum number of channels, or playback if
7410 // they are equal. This might limit our sample rate options, but so
7413 if ( info.outputChannels >= info.inputChannels )
7414 stream = SND_PCM_STREAM_PLAYBACK;
7416 stream = SND_PCM_STREAM_CAPTURE;
7417 snd_pcm_info_set_stream( pcminfo, stream );
7419 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
7421 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7422 errorText_ = errorStream_.str();
7423 error( RtAudioError::WARNING );
7427 // The device is open ... fill the parameter structure.
7428 result = snd_pcm_hw_params_any( phandle, params );
7430 snd_pcm_close( phandle );
7431 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7432 errorText_ = errorStream_.str();
7433 error( RtAudioError::WARNING );
7437 // Test our discrete set of sample rate values.
7438 info.sampleRates.clear();
7439 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
7440 if ( snd_pcm_hw_params_test_rate( phandle, params, SAMPLE_RATES[i], 0 ) == 0 ) {
7441 info.sampleRates.push_back( SAMPLE_RATES[i] );
7443 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
7444 info.preferredSampleRate = SAMPLE_RATES[i];
7447 if ( info.sampleRates.size() == 0 ) {
7448 snd_pcm_close( phandle );
7449 errorStream_ << "RtApiAlsa::getDeviceInfo: no supported sample rates found for device (" << name << ").";
7450 errorText_ = errorStream_.str();
7451 error( RtAudioError::WARNING );
7455 // Probe the supported data formats ... we don't care about endian-ness just yet
7456 snd_pcm_format_t format;
7457 info.nativeFormats = 0;
7458 format = SND_PCM_FORMAT_S8;
7459 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7460 info.nativeFormats |= RTAUDIO_SINT8;
7461 format = SND_PCM_FORMAT_S16;
7462 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7463 info.nativeFormats |= RTAUDIO_SINT16;
7464 format = SND_PCM_FORMAT_S24;
7465 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7466 info.nativeFormats |= RTAUDIO_SINT24;
7467 format = SND_PCM_FORMAT_S32;
7468 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7469 info.nativeFormats |= RTAUDIO_SINT32;
7470 format = SND_PCM_FORMAT_FLOAT;
7471 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7472 info.nativeFormats |= RTAUDIO_FLOAT32;
7473 format = SND_PCM_FORMAT_FLOAT64;
7474 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7475 info.nativeFormats |= RTAUDIO_FLOAT64;
7477 // Check that we have at least one supported format
7478 if ( info.nativeFormats == 0 ) {
7479 snd_pcm_close( phandle );
7480 errorStream_ << "RtApiAlsa::getDeviceInfo: pcm device (" << name << ") data format not supported by RtAudio.";
7481 errorText_ = errorStream_.str();
7482 error( RtAudioError::WARNING );
7486 // Get the device name
7488 result = snd_card_get_name( card, &cardname );
7489 if ( result >= 0 ) {
7490 sprintf( name, "hw:%s,%d", cardname, subdevice );
7495 // That's all ... close the device and return
7496 snd_pcm_close( phandle );
7501 void RtApiAlsa :: saveDeviceInfo( void )
7505 unsigned int nDevices = getDeviceCount();
7506 devices_.resize( nDevices );
7507 for ( unsigned int i=0; i<nDevices; i++ )
7508 devices_[i] = getDeviceInfo( i );
7511 bool RtApiAlsa :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
7512 unsigned int firstChannel, unsigned int sampleRate,
7513 RtAudioFormat format, unsigned int *bufferSize,
7514 RtAudio::StreamOptions *options )
7517 #if defined(__RTAUDIO_DEBUG__)
7519 snd_output_stdio_attach(&out, stderr, 0);
7522 // I'm not using the "plug" interface ... too much inconsistent behavior.
7524 unsigned nDevices = 0;
7525 int result, subdevice, card;
7529 if ( options && options->flags & RTAUDIO_ALSA_USE_DEFAULT )
7530 snprintf(name, sizeof(name), "%s", "default");
7532 // Count cards and devices
7534 snd_card_next( &card );
7535 while ( card >= 0 ) {
7536 sprintf( name, "hw:%d", card );
7537 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
7539 errorStream_ << "RtApiAlsa::probeDeviceOpen: control open, card = " << card << ", " << snd_strerror( result ) << ".";
7540 errorText_ = errorStream_.str();
7545 result = snd_ctl_pcm_next_device( chandle, &subdevice );
7546 if ( result < 0 ) break;
7547 if ( subdevice < 0 ) break;
7548 if ( nDevices == device ) {
7549 sprintf( name, "hw:%d,%d", card, subdevice );
7550 snd_ctl_close( chandle );
7555 snd_ctl_close( chandle );
7556 snd_card_next( &card );
7559 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
7560 if ( result == 0 ) {
7561 if ( nDevices == device ) {
7562 strcpy( name, "default" );
7563 snd_ctl_close( chandle );
7568 snd_ctl_close( chandle );
7570 if ( nDevices == 0 ) {
7571 // This should not happen because a check is made before this function is called.
7572 errorText_ = "RtApiAlsa::probeDeviceOpen: no devices found!";
7576 if ( device >= nDevices ) {
7577 // This should not happen because a check is made before this function is called.
7578 errorText_ = "RtApiAlsa::probeDeviceOpen: device ID is invalid!";
7585 // The getDeviceInfo() function will not work for a device that is
7586 // already open. Thus, we'll probe the system before opening a
7587 // stream and save the results for use by getDeviceInfo().
7588 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) // only do once
7589 this->saveDeviceInfo();
7591 snd_pcm_stream_t stream;
7592 if ( mode == OUTPUT )
7593 stream = SND_PCM_STREAM_PLAYBACK;
7595 stream = SND_PCM_STREAM_CAPTURE;
7598 int openMode = SND_PCM_ASYNC;
7599 result = snd_pcm_open( &phandle, name, stream, openMode );
7601 if ( mode == OUTPUT )
7602 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for output.";
7604 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for input.";
7605 errorText_ = errorStream_.str();
7609 // Fill the parameter structure.
7610 snd_pcm_hw_params_t *hw_params;
7611 snd_pcm_hw_params_alloca( &hw_params );
7612 result = snd_pcm_hw_params_any( phandle, hw_params );
7614 snd_pcm_close( phandle );
7615 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") parameters, " << snd_strerror( result ) << ".";
7616 errorText_ = errorStream_.str();
7620 #if defined(__RTAUDIO_DEBUG__)
7621 fprintf( stderr, "\nRtApiAlsa: dump hardware params just after device open:\n\n" );
7622 snd_pcm_hw_params_dump( hw_params, out );
7625 // Set access ... check user preference.
7626 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) {
7627 stream_.userInterleaved = false;
7628 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
7630 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
7631 stream_.deviceInterleaved[mode] = true;
7634 stream_.deviceInterleaved[mode] = false;
7637 stream_.userInterleaved = true;
7638 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
7640 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
7641 stream_.deviceInterleaved[mode] = false;
7644 stream_.deviceInterleaved[mode] = true;
7648 snd_pcm_close( phandle );
7649 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") access, " << snd_strerror( result ) << ".";
7650 errorText_ = errorStream_.str();
7654 // Determine how to set the device format.
7655 stream_.userFormat = format;
7656 snd_pcm_format_t deviceFormat = SND_PCM_FORMAT_UNKNOWN;
7658 if ( format == RTAUDIO_SINT8 )
7659 deviceFormat = SND_PCM_FORMAT_S8;
7660 else if ( format == RTAUDIO_SINT16 )
7661 deviceFormat = SND_PCM_FORMAT_S16;
7662 else if ( format == RTAUDIO_SINT24 )
7663 deviceFormat = SND_PCM_FORMAT_S24;
7664 else if ( format == RTAUDIO_SINT32 )
7665 deviceFormat = SND_PCM_FORMAT_S32;
7666 else if ( format == RTAUDIO_FLOAT32 )
7667 deviceFormat = SND_PCM_FORMAT_FLOAT;
7668 else if ( format == RTAUDIO_FLOAT64 )
7669 deviceFormat = SND_PCM_FORMAT_FLOAT64;
7671 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat) == 0) {
7672 stream_.deviceFormat[mode] = format;
7676 // The user requested format is not natively supported by the device.
7677 deviceFormat = SND_PCM_FORMAT_FLOAT64;
7678 if ( snd_pcm_hw_params_test_format( phandle, hw_params, deviceFormat ) == 0 ) {
7679 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
7683 deviceFormat = SND_PCM_FORMAT_FLOAT;
7684 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7685 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
7689 deviceFormat = SND_PCM_FORMAT_S32;
7690 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7691 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
7695 deviceFormat = SND_PCM_FORMAT_S24;
7696 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7697 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
7701 deviceFormat = SND_PCM_FORMAT_S16;
7702 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7703 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
7707 deviceFormat = SND_PCM_FORMAT_S8;
7708 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7709 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
7713 // If we get here, no supported format was found.
7714 snd_pcm_close( phandle );
7715 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device " << device << " data format not supported by RtAudio.";
7716 errorText_ = errorStream_.str();
7720 result = snd_pcm_hw_params_set_format( phandle, hw_params, deviceFormat );
7722 snd_pcm_close( phandle );
7723 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") data format, " << snd_strerror( result ) << ".";
7724 errorText_ = errorStream_.str();
7728 // Determine whether byte-swaping is necessary.
7729 stream_.doByteSwap[mode] = false;
7730 if ( deviceFormat != SND_PCM_FORMAT_S8 ) {
7731 result = snd_pcm_format_cpu_endian( deviceFormat );
7733 stream_.doByteSwap[mode] = true;
7734 else if (result < 0) {
7735 snd_pcm_close( phandle );
7736 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") endian-ness, " << snd_strerror( result ) << ".";
7737 errorText_ = errorStream_.str();
7742 // Set the sample rate.
7743 result = snd_pcm_hw_params_set_rate_near( phandle, hw_params, (unsigned int*) &sampleRate, 0 );
7745 snd_pcm_close( phandle );
7746 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting sample rate on device (" << name << "), " << snd_strerror( result ) << ".";
7747 errorText_ = errorStream_.str();
7751 // Determine the number of channels for this device. We support a possible
7752 // minimum device channel number > than the value requested by the user.
7753 stream_.nUserChannels[mode] = channels;
7755 result = snd_pcm_hw_params_get_channels_max( hw_params, &value );
7756 unsigned int deviceChannels = value;
7757 if ( result < 0 || deviceChannels < channels + firstChannel ) {
7758 snd_pcm_close( phandle );
7759 errorStream_ << "RtApiAlsa::probeDeviceOpen: requested channel parameters not supported by device (" << name << "), " << snd_strerror( result ) << ".";
7760 errorText_ = errorStream_.str();
7764 result = snd_pcm_hw_params_get_channels_min( hw_params, &value );
7766 snd_pcm_close( phandle );
7767 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting minimum channels for device (" << name << "), " << snd_strerror( result ) << ".";
7768 errorText_ = errorStream_.str();
7771 deviceChannels = value;
7772 if ( deviceChannels < channels + firstChannel ) deviceChannels = channels + firstChannel;
7773 stream_.nDeviceChannels[mode] = deviceChannels;
7775 // Set the device channels.
7776 result = snd_pcm_hw_params_set_channels( phandle, hw_params, deviceChannels );
7778 snd_pcm_close( phandle );
7779 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting channels for device (" << name << "), " << snd_strerror( result ) << ".";
7780 errorText_ = errorStream_.str();
7784 // Set the buffer (or period) size.
7786 snd_pcm_uframes_t periodSize = *bufferSize;
7787 result = snd_pcm_hw_params_set_period_size_near( phandle, hw_params, &periodSize, &dir );
7789 snd_pcm_close( phandle );
7790 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting period size for device (" << name << "), " << snd_strerror( result ) << ".";
7791 errorText_ = errorStream_.str();
7794 *bufferSize = periodSize;
7796 // Set the buffer number, which in ALSA is referred to as the "period".
7797 unsigned int periods = 0;
7798 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) periods = 2;
7799 if ( options && options->numberOfBuffers > 0 ) periods = options->numberOfBuffers;
7800 if ( periods < 2 ) periods = 4; // a fairly safe default value
7801 result = snd_pcm_hw_params_set_periods_near( phandle, hw_params, &periods, &dir );
7803 snd_pcm_close( phandle );
7804 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting periods for device (" << name << "), " << snd_strerror( result ) << ".";
7805 errorText_ = errorStream_.str();
7809 // If attempting to setup a duplex stream, the bufferSize parameter
7810 // MUST be the same in both directions!
7811 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
7812 snd_pcm_close( phandle );
7813 errorStream_ << "RtApiAlsa::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << name << ").";
7814 errorText_ = errorStream_.str();
7818 stream_.bufferSize = *bufferSize;
7820 // Install the hardware configuration
7821 result = snd_pcm_hw_params( phandle, hw_params );
7823 snd_pcm_close( phandle );
7824 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing hardware configuration on device (" << name << "), " << snd_strerror( result ) << ".";
7825 errorText_ = errorStream_.str();
7829 #if defined(__RTAUDIO_DEBUG__)
7830 fprintf(stderr, "\nRtApiAlsa: dump hardware params after installation:\n\n");
7831 snd_pcm_hw_params_dump( hw_params, out );
7834 // Set the software configuration to fill buffers with zeros and prevent device stopping on xruns.
7835 snd_pcm_sw_params_t *sw_params = NULL;
7836 snd_pcm_sw_params_alloca( &sw_params );
7837 snd_pcm_sw_params_current( phandle, sw_params );
7838 snd_pcm_sw_params_set_start_threshold( phandle, sw_params, *bufferSize );
7839 snd_pcm_sw_params_set_stop_threshold( phandle, sw_params, ULONG_MAX );
7840 snd_pcm_sw_params_set_silence_threshold( phandle, sw_params, 0 );
7842 // The following two settings were suggested by Theo Veenker
7843 //snd_pcm_sw_params_set_avail_min( phandle, sw_params, *bufferSize );
7844 //snd_pcm_sw_params_set_xfer_align( phandle, sw_params, 1 );
7846 // here are two options for a fix
7847 //snd_pcm_sw_params_set_silence_size( phandle, sw_params, ULONG_MAX );
7848 snd_pcm_uframes_t val;
7849 snd_pcm_sw_params_get_boundary( sw_params, &val );
7850 snd_pcm_sw_params_set_silence_size( phandle, sw_params, val );
7852 result = snd_pcm_sw_params( phandle, sw_params );
7854 snd_pcm_close( phandle );
7855 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing software configuration on device (" << name << "), " << snd_strerror( result ) << ".";
7856 errorText_ = errorStream_.str();
7860 #if defined(__RTAUDIO_DEBUG__)
7861 fprintf(stderr, "\nRtApiAlsa: dump software params after installation:\n\n");
7862 snd_pcm_sw_params_dump( sw_params, out );
7865 // Set flags for buffer conversion
7866 stream_.doConvertBuffer[mode] = false;
7867 if ( stream_.userFormat != stream_.deviceFormat[mode] )
7868 stream_.doConvertBuffer[mode] = true;
7869 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
7870 stream_.doConvertBuffer[mode] = true;
7871 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
7872 stream_.nUserChannels[mode] > 1 )
7873 stream_.doConvertBuffer[mode] = true;
7875 // Allocate the ApiHandle if necessary and then save.
7876 AlsaHandle *apiInfo = 0;
7877 if ( stream_.apiHandle == 0 ) {
7879 apiInfo = (AlsaHandle *) new AlsaHandle;
7881 catch ( std::bad_alloc& ) {
7882 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating AlsaHandle memory.";
7886 if ( pthread_cond_init( &apiInfo->runnable_cv, NULL ) ) {
7887 errorText_ = "RtApiAlsa::probeDeviceOpen: error initializing pthread condition variable.";
7891 stream_.apiHandle = (void *) apiInfo;
7892 apiInfo->handles[0] = 0;
7893 apiInfo->handles[1] = 0;
7896 apiInfo = (AlsaHandle *) stream_.apiHandle;
7898 apiInfo->handles[mode] = phandle;
7901 // Allocate necessary internal buffers.
7902 unsigned long bufferBytes;
7903 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
7904 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
7905 if ( stream_.userBuffer[mode] == NULL ) {
7906 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating user buffer memory.";
7910 if ( stream_.doConvertBuffer[mode] ) {
7912 bool makeBuffer = true;
7913 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
7914 if ( mode == INPUT ) {
7915 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
7916 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
7917 if ( bufferBytes <= bytesOut ) makeBuffer = false;
7922 bufferBytes *= *bufferSize;
7923 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
7924 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
7925 if ( stream_.deviceBuffer == NULL ) {
7926 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating device buffer memory.";
7932 stream_.sampleRate = sampleRate;
7933 stream_.nBuffers = periods;
7934 stream_.device[mode] = device;
7935 stream_.state = STREAM_STOPPED;
7937 // Setup the buffer conversion information structure.
7938 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
7940 // Setup thread if necessary.
7941 if ( stream_.mode == OUTPUT && mode == INPUT ) {
7942 // We had already set up an output stream.
7943 stream_.mode = DUPLEX;
7944 // Link the streams if possible.
7945 apiInfo->synchronized = false;
7946 if ( snd_pcm_link( apiInfo->handles[0], apiInfo->handles[1] ) == 0 )
7947 apiInfo->synchronized = true;
7949 errorText_ = "RtApiAlsa::probeDeviceOpen: unable to synchronize input and output devices.";
7950 error( RtAudioError::WARNING );
7954 stream_.mode = mode;
7956 // Setup callback thread.
7957 stream_.callbackInfo.object = (void *) this;
7959 // Set the thread attributes for joinable and realtime scheduling
7960 // priority (optional). The higher priority will only take affect
7961 // if the program is run as root or suid. Note, under Linux
7962 // processes with CAP_SYS_NICE privilege, a user can change
7963 // scheduling policy and priority (thus need not be root). See
7964 // POSIX "capabilities".
7965 pthread_attr_t attr;
7966 pthread_attr_init( &attr );
7967 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
7968 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
7969 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
7970 stream_.callbackInfo.doRealtime = true;
7971 struct sched_param param;
7972 int priority = options->priority;
7973 int min = sched_get_priority_min( SCHED_RR );
7974 int max = sched_get_priority_max( SCHED_RR );
7975 if ( priority < min ) priority = min;
7976 else if ( priority > max ) priority = max;
7977 param.sched_priority = priority;
7979 // Set the policy BEFORE the priority. Otherwise it fails.
7980 pthread_attr_setschedpolicy(&attr, SCHED_RR);
7981 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
7982 // This is definitely required. Otherwise it fails.
7983 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
7984 pthread_attr_setschedparam(&attr, ¶m);
7987 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
7989 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
7992 stream_.callbackInfo.isRunning = true;
7993 result = pthread_create( &stream_.callbackInfo.thread, &attr, alsaCallbackHandler, &stream_.callbackInfo );
7994 pthread_attr_destroy( &attr );
7996 // Failed. Try instead with default attributes.
7997 result = pthread_create( &stream_.callbackInfo.thread, NULL, alsaCallbackHandler, &stream_.callbackInfo );
7999 stream_.callbackInfo.isRunning = false;
8000 errorText_ = "RtApiAlsa::error creating callback thread!";
8010 pthread_cond_destroy( &apiInfo->runnable_cv );
8011 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
8012 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
8014 stream_.apiHandle = 0;
8017 if ( phandle) snd_pcm_close( phandle );
8019 for ( int i=0; i<2; i++ ) {
8020 if ( stream_.userBuffer[i] ) {
8021 free( stream_.userBuffer[i] );
8022 stream_.userBuffer[i] = 0;
8026 if ( stream_.deviceBuffer ) {
8027 free( stream_.deviceBuffer );
8028 stream_.deviceBuffer = 0;
8031 stream_.state = STREAM_CLOSED;
8035 void RtApiAlsa :: closeStream()
8037 if ( stream_.state == STREAM_CLOSED ) {
8038 errorText_ = "RtApiAlsa::closeStream(): no open stream to close!";
8039 error( RtAudioError::WARNING );
8043 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8044 stream_.callbackInfo.isRunning = false;
8045 MUTEX_LOCK( &stream_.mutex );
8046 if ( stream_.state == STREAM_STOPPED ) {
8047 apiInfo->runnable = true;
8048 pthread_cond_signal( &apiInfo->runnable_cv );
8050 MUTEX_UNLOCK( &stream_.mutex );
8051 pthread_join( stream_.callbackInfo.thread, NULL );
8053 if ( stream_.state == STREAM_RUNNING ) {
8054 stream_.state = STREAM_STOPPED;
8055 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
8056 snd_pcm_drop( apiInfo->handles[0] );
8057 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
8058 snd_pcm_drop( apiInfo->handles[1] );
8062 pthread_cond_destroy( &apiInfo->runnable_cv );
8063 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
8064 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
8066 stream_.apiHandle = 0;
8069 for ( int i=0; i<2; i++ ) {
8070 if ( stream_.userBuffer[i] ) {
8071 free( stream_.userBuffer[i] );
8072 stream_.userBuffer[i] = 0;
8076 if ( stream_.deviceBuffer ) {
8077 free( stream_.deviceBuffer );
8078 stream_.deviceBuffer = 0;
8081 stream_.mode = UNINITIALIZED;
8082 stream_.state = STREAM_CLOSED;
8085 void RtApiAlsa :: startStream()
8087 // This method calls snd_pcm_prepare if the device isn't already in that state.
8090 if ( stream_.state == STREAM_RUNNING ) {
8091 errorText_ = "RtApiAlsa::startStream(): the stream is already running!";
8092 error( RtAudioError::WARNING );
8096 MUTEX_LOCK( &stream_.mutex );
8098 #if defined( HAVE_GETTIMEOFDAY )
8099 gettimeofday( &stream_.lastTickTimestamp, NULL );
8103 snd_pcm_state_t state;
8104 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8105 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
8106 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8107 state = snd_pcm_state( handle[0] );
8108 if ( state != SND_PCM_STATE_PREPARED ) {
8109 result = snd_pcm_prepare( handle[0] );
8111 errorStream_ << "RtApiAlsa::startStream: error preparing output pcm device, " << snd_strerror( result ) << ".";
8112 errorText_ = errorStream_.str();
8118 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
8119 result = snd_pcm_drop(handle[1]); // fix to remove stale data received since device has been open
8120 state = snd_pcm_state( handle[1] );
8121 if ( state != SND_PCM_STATE_PREPARED ) {
8122 result = snd_pcm_prepare( handle[1] );
8124 errorStream_ << "RtApiAlsa::startStream: error preparing input pcm device, " << snd_strerror( result ) << ".";
8125 errorText_ = errorStream_.str();
8131 stream_.state = STREAM_RUNNING;
8134 apiInfo->runnable = true;
8135 pthread_cond_signal( &apiInfo->runnable_cv );
8136 MUTEX_UNLOCK( &stream_.mutex );
8138 if ( result >= 0 ) return;
8139 error( RtAudioError::SYSTEM_ERROR );
8142 void RtApiAlsa :: stopStream()
8145 if ( stream_.state == STREAM_STOPPED ) {
8146 errorText_ = "RtApiAlsa::stopStream(): the stream is already stopped!";
8147 error( RtAudioError::WARNING );
8151 stream_.state = STREAM_STOPPED;
8152 MUTEX_LOCK( &stream_.mutex );
8155 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8156 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
8157 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8158 if ( apiInfo->synchronized )
8159 result = snd_pcm_drop( handle[0] );
8161 result = snd_pcm_drain( handle[0] );
8163 errorStream_ << "RtApiAlsa::stopStream: error draining output pcm device, " << snd_strerror( result ) << ".";
8164 errorText_ = errorStream_.str();
8169 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
8170 result = snd_pcm_drop( handle[1] );
8172 errorStream_ << "RtApiAlsa::stopStream: error stopping input pcm device, " << snd_strerror( result ) << ".";
8173 errorText_ = errorStream_.str();
8179 apiInfo->runnable = false; // fixes high CPU usage when stopped
8180 MUTEX_UNLOCK( &stream_.mutex );
8182 if ( result >= 0 ) return;
8183 error( RtAudioError::SYSTEM_ERROR );
8186 void RtApiAlsa :: abortStream()
8189 if ( stream_.state == STREAM_STOPPED ) {
8190 errorText_ = "RtApiAlsa::abortStream(): the stream is already stopped!";
8191 error( RtAudioError::WARNING );
8195 stream_.state = STREAM_STOPPED;
8196 MUTEX_LOCK( &stream_.mutex );
8199 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8200 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
8201 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8202 result = snd_pcm_drop( handle[0] );
8204 errorStream_ << "RtApiAlsa::abortStream: error aborting output pcm device, " << snd_strerror( result ) << ".";
8205 errorText_ = errorStream_.str();
8210 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
8211 result = snd_pcm_drop( handle[1] );
8213 errorStream_ << "RtApiAlsa::abortStream: error aborting input pcm device, " << snd_strerror( result ) << ".";
8214 errorText_ = errorStream_.str();
8220 apiInfo->runnable = false; // fixes high CPU usage when stopped
8221 MUTEX_UNLOCK( &stream_.mutex );
8223 if ( result >= 0 ) return;
8224 error( RtAudioError::SYSTEM_ERROR );
8227 void RtApiAlsa :: callbackEvent()
8229 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8230 if ( stream_.state == STREAM_STOPPED ) {
8231 MUTEX_LOCK( &stream_.mutex );
8232 while ( !apiInfo->runnable )
8233 pthread_cond_wait( &apiInfo->runnable_cv, &stream_.mutex );
8235 if ( stream_.state != STREAM_RUNNING ) {
8236 MUTEX_UNLOCK( &stream_.mutex );
8239 MUTEX_UNLOCK( &stream_.mutex );
8242 if ( stream_.state == STREAM_CLOSED ) {
8243 errorText_ = "RtApiAlsa::callbackEvent(): the stream is closed ... this shouldn't happen!";
8244 error( RtAudioError::WARNING );
8248 int doStopStream = 0;
8249 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
8250 double streamTime = getStreamTime();
8251 RtAudioStreamStatus status = 0;
8252 if ( stream_.mode != INPUT && apiInfo->xrun[0] == true ) {
8253 status |= RTAUDIO_OUTPUT_UNDERFLOW;
8254 apiInfo->xrun[0] = false;
8256 if ( stream_.mode != OUTPUT && apiInfo->xrun[1] == true ) {
8257 status |= RTAUDIO_INPUT_OVERFLOW;
8258 apiInfo->xrun[1] = false;
8260 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
8261 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
8263 if ( doStopStream == 2 ) {
8268 MUTEX_LOCK( &stream_.mutex );
8270 // The state might change while waiting on a mutex.
8271 if ( stream_.state == STREAM_STOPPED ) goto unlock;
8277 snd_pcm_sframes_t frames;
8278 RtAudioFormat format;
8279 handle = (snd_pcm_t **) apiInfo->handles;
8281 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
8283 // Setup parameters.
8284 if ( stream_.doConvertBuffer[1] ) {
8285 buffer = stream_.deviceBuffer;
8286 channels = stream_.nDeviceChannels[1];
8287 format = stream_.deviceFormat[1];
8290 buffer = stream_.userBuffer[1];
8291 channels = stream_.nUserChannels[1];
8292 format = stream_.userFormat;
8295 // Read samples from device in interleaved/non-interleaved format.
8296 if ( stream_.deviceInterleaved[1] )
8297 result = snd_pcm_readi( handle[1], buffer, stream_.bufferSize );
8299 void *bufs[channels];
8300 size_t offset = stream_.bufferSize * formatBytes( format );
8301 for ( int i=0; i<channels; i++ )
8302 bufs[i] = (void *) (buffer + (i * offset));
8303 result = snd_pcm_readn( handle[1], bufs, stream_.bufferSize );
8306 if ( result < (int) stream_.bufferSize ) {
8307 // Either an error or overrun occured.
8308 if ( result == -EPIPE ) {
8309 snd_pcm_state_t state = snd_pcm_state( handle[1] );
8310 if ( state == SND_PCM_STATE_XRUN ) {
8311 apiInfo->xrun[1] = true;
8312 result = snd_pcm_prepare( handle[1] );
8314 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after overrun, " << snd_strerror( result ) << ".";
8315 errorText_ = errorStream_.str();
8319 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
8320 errorText_ = errorStream_.str();
8324 errorStream_ << "RtApiAlsa::callbackEvent: audio read error, " << snd_strerror( result ) << ".";
8325 errorText_ = errorStream_.str();
8327 error( RtAudioError::WARNING );
8331 // Do byte swapping if necessary.
8332 if ( stream_.doByteSwap[1] )
8333 byteSwapBuffer( buffer, stream_.bufferSize * channels, format );
8335 // Do buffer conversion if necessary.
8336 if ( stream_.doConvertBuffer[1] )
8337 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
8339 // Check stream latency
8340 result = snd_pcm_delay( handle[1], &frames );
8341 if ( result == 0 && frames > 0 ) stream_.latency[1] = frames;
8346 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8348 // Setup parameters and do buffer conversion if necessary.
8349 if ( stream_.doConvertBuffer[0] ) {
8350 buffer = stream_.deviceBuffer;
8351 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
8352 channels = stream_.nDeviceChannels[0];
8353 format = stream_.deviceFormat[0];
8356 buffer = stream_.userBuffer[0];
8357 channels = stream_.nUserChannels[0];
8358 format = stream_.userFormat;
8361 // Do byte swapping if necessary.
8362 if ( stream_.doByteSwap[0] )
8363 byteSwapBuffer(buffer, stream_.bufferSize * channels, format);
8365 // Write samples to device in interleaved/non-interleaved format.
8366 if ( stream_.deviceInterleaved[0] )
8367 result = snd_pcm_writei( handle[0], buffer, stream_.bufferSize );
8369 void *bufs[channels];
8370 size_t offset = stream_.bufferSize * formatBytes( format );
8371 for ( int i=0; i<channels; i++ )
8372 bufs[i] = (void *) (buffer + (i * offset));
8373 result = snd_pcm_writen( handle[0], bufs, stream_.bufferSize );
8376 if ( result < (int) stream_.bufferSize ) {
8377 // Either an error or underrun occured.
8378 if ( result == -EPIPE ) {
8379 snd_pcm_state_t state = snd_pcm_state( handle[0] );
8380 if ( state == SND_PCM_STATE_XRUN ) {
8381 apiInfo->xrun[0] = true;
8382 result = snd_pcm_prepare( handle[0] );
8384 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after underrun, " << snd_strerror( result ) << ".";
8385 errorText_ = errorStream_.str();
8388 errorText_ = "RtApiAlsa::callbackEvent: audio write error, underrun.";
8391 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
8392 errorText_ = errorStream_.str();
8396 errorStream_ << "RtApiAlsa::callbackEvent: audio write error, " << snd_strerror( result ) << ".";
8397 errorText_ = errorStream_.str();
8399 error( RtAudioError::WARNING );
8403 // Check stream latency
8404 result = snd_pcm_delay( handle[0], &frames );
8405 if ( result == 0 && frames > 0 ) stream_.latency[0] = frames;
8409 MUTEX_UNLOCK( &stream_.mutex );
8411 RtApi::tickStreamTime();
8412 if ( doStopStream == 1 ) this->stopStream();
8415 static void *alsaCallbackHandler( void *ptr )
8417 CallbackInfo *info = (CallbackInfo *) ptr;
8418 RtApiAlsa *object = (RtApiAlsa *) info->object;
8419 bool *isRunning = &info->isRunning;
8421 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
8422 if ( info->doRealtime ) {
8423 std::cerr << "RtAudio alsa: " <<
8424 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
8425 "running realtime scheduling" << std::endl;
8429 while ( *isRunning == true ) {
8430 pthread_testcancel();
8431 object->callbackEvent();
8434 pthread_exit( NULL );
8437 //******************** End of __LINUX_ALSA__ *********************//
8440 #if defined(__LINUX_PULSE__)
8442 // Code written by Peter Meerwald, pmeerw@pmeerw.net
8443 // and Tristan Matthews.
8445 #include <pulse/error.h>
8446 #include <pulse/simple.h>
8449 static const unsigned int SUPPORTED_SAMPLERATES[] = { 8000, 16000, 22050, 32000,
8450 44100, 48000, 96000, 0};
8452 struct rtaudio_pa_format_mapping_t {
8453 RtAudioFormat rtaudio_format;
8454 pa_sample_format_t pa_format;
8457 static const rtaudio_pa_format_mapping_t supported_sampleformats[] = {
8458 {RTAUDIO_SINT16, PA_SAMPLE_S16LE},
8459 {RTAUDIO_SINT32, PA_SAMPLE_S32LE},
8460 {RTAUDIO_FLOAT32, PA_SAMPLE_FLOAT32LE},
8461 {0, PA_SAMPLE_INVALID}};
8463 struct PulseAudioHandle {
8467 pthread_cond_t runnable_cv;
8469 PulseAudioHandle() : s_play(0), s_rec(0), runnable(false) { }
8472 RtApiPulse::~RtApiPulse()
8474 if ( stream_.state != STREAM_CLOSED )
8478 unsigned int RtApiPulse::getDeviceCount( void )
8483 RtAudio::DeviceInfo RtApiPulse::getDeviceInfo( unsigned int /*device*/ )
8485 RtAudio::DeviceInfo info;
8487 info.name = "PulseAudio";
8488 info.outputChannels = 2;
8489 info.inputChannels = 2;
8490 info.duplexChannels = 2;
8491 info.isDefaultOutput = true;
8492 info.isDefaultInput = true;
8494 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr )
8495 info.sampleRates.push_back( *sr );
8497 info.preferredSampleRate = 48000;
8498 info.nativeFormats = RTAUDIO_SINT16 | RTAUDIO_SINT32 | RTAUDIO_FLOAT32;
8503 static void *pulseaudio_callback( void * user )
8505 CallbackInfo *cbi = static_cast<CallbackInfo *>( user );
8506 RtApiPulse *context = static_cast<RtApiPulse *>( cbi->object );
8507 volatile bool *isRunning = &cbi->isRunning;
8509 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
8510 if (cbi->doRealtime) {
8511 std::cerr << "RtAudio pulse: " <<
8512 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
8513 "running realtime scheduling" << std::endl;
8517 while ( *isRunning ) {
8518 pthread_testcancel();
8519 context->callbackEvent();
8522 pthread_exit( NULL );
8525 void RtApiPulse::closeStream( void )
8527 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8529 stream_.callbackInfo.isRunning = false;
8531 MUTEX_LOCK( &stream_.mutex );
8532 if ( stream_.state == STREAM_STOPPED ) {
8533 pah->runnable = true;
8534 pthread_cond_signal( &pah->runnable_cv );
8536 MUTEX_UNLOCK( &stream_.mutex );
8538 pthread_join( pah->thread, 0 );
8539 if ( pah->s_play ) {
8540 pa_simple_flush( pah->s_play, NULL );
8541 pa_simple_free( pah->s_play );
8544 pa_simple_free( pah->s_rec );
8546 pthread_cond_destroy( &pah->runnable_cv );
8548 stream_.apiHandle = 0;
8551 if ( stream_.userBuffer[0] ) {
8552 free( stream_.userBuffer[0] );
8553 stream_.userBuffer[0] = 0;
8555 if ( stream_.userBuffer[1] ) {
8556 free( stream_.userBuffer[1] );
8557 stream_.userBuffer[1] = 0;
8560 stream_.state = STREAM_CLOSED;
8561 stream_.mode = UNINITIALIZED;
8564 void RtApiPulse::callbackEvent( void )
8566 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8568 if ( stream_.state == STREAM_STOPPED ) {
8569 MUTEX_LOCK( &stream_.mutex );
8570 while ( !pah->runnable )
8571 pthread_cond_wait( &pah->runnable_cv, &stream_.mutex );
8573 if ( stream_.state != STREAM_RUNNING ) {
8574 MUTEX_UNLOCK( &stream_.mutex );
8577 MUTEX_UNLOCK( &stream_.mutex );
8580 if ( stream_.state == STREAM_CLOSED ) {
8581 errorText_ = "RtApiPulse::callbackEvent(): the stream is closed ... "
8582 "this shouldn't happen!";
8583 error( RtAudioError::WARNING );
8587 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
8588 double streamTime = getStreamTime();
8589 RtAudioStreamStatus status = 0;
8590 int doStopStream = callback( stream_.userBuffer[OUTPUT], stream_.userBuffer[INPUT],
8591 stream_.bufferSize, streamTime, status,
8592 stream_.callbackInfo.userData );
8594 if ( doStopStream == 2 ) {
8599 MUTEX_LOCK( &stream_.mutex );
8600 void *pulse_in = stream_.doConvertBuffer[INPUT] ? stream_.deviceBuffer : stream_.userBuffer[INPUT];
8601 void *pulse_out = stream_.doConvertBuffer[OUTPUT] ? stream_.deviceBuffer : stream_.userBuffer[OUTPUT];
8603 if ( stream_.state != STREAM_RUNNING )
8608 if (stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8609 if ( stream_.doConvertBuffer[OUTPUT] ) {
8610 convertBuffer( stream_.deviceBuffer,
8611 stream_.userBuffer[OUTPUT],
8612 stream_.convertInfo[OUTPUT] );
8613 bytes = stream_.nDeviceChannels[OUTPUT] * stream_.bufferSize *
8614 formatBytes( stream_.deviceFormat[OUTPUT] );
8616 bytes = stream_.nUserChannels[OUTPUT] * stream_.bufferSize *
8617 formatBytes( stream_.userFormat );
8619 if ( pa_simple_write( pah->s_play, pulse_out, bytes, &pa_error ) < 0 ) {
8620 errorStream_ << "RtApiPulse::callbackEvent: audio write error, " <<
8621 pa_strerror( pa_error ) << ".";
8622 errorText_ = errorStream_.str();
8623 error( RtAudioError::WARNING );
8627 if ( stream_.mode == INPUT || stream_.mode == DUPLEX) {
8628 if ( stream_.doConvertBuffer[INPUT] )
8629 bytes = stream_.nDeviceChannels[INPUT] * stream_.bufferSize *
8630 formatBytes( stream_.deviceFormat[INPUT] );
8632 bytes = stream_.nUserChannels[INPUT] * stream_.bufferSize *
8633 formatBytes( stream_.userFormat );
8635 if ( pa_simple_read( pah->s_rec, pulse_in, bytes, &pa_error ) < 0 ) {
8636 errorStream_ << "RtApiPulse::callbackEvent: audio read error, " <<
8637 pa_strerror( pa_error ) << ".";
8638 errorText_ = errorStream_.str();
8639 error( RtAudioError::WARNING );
8641 if ( stream_.doConvertBuffer[INPUT] ) {
8642 convertBuffer( stream_.userBuffer[INPUT],
8643 stream_.deviceBuffer,
8644 stream_.convertInfo[INPUT] );
8649 MUTEX_UNLOCK( &stream_.mutex );
8650 RtApi::tickStreamTime();
8652 if ( doStopStream == 1 )
8656 void RtApiPulse::startStream( void )
8658 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8660 if ( stream_.state == STREAM_CLOSED ) {
8661 errorText_ = "RtApiPulse::startStream(): the stream is not open!";
8662 error( RtAudioError::INVALID_USE );
8665 if ( stream_.state == STREAM_RUNNING ) {
8666 errorText_ = "RtApiPulse::startStream(): the stream is already running!";
8667 error( RtAudioError::WARNING );
8671 MUTEX_LOCK( &stream_.mutex );
8673 #if defined( HAVE_GETTIMEOFDAY )
8674 gettimeofday( &stream_.lastTickTimestamp, NULL );
8677 stream_.state = STREAM_RUNNING;
8679 pah->runnable = true;
8680 pthread_cond_signal( &pah->runnable_cv );
8681 MUTEX_UNLOCK( &stream_.mutex );
8684 void RtApiPulse::stopStream( void )
8686 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8688 if ( stream_.state == STREAM_CLOSED ) {
8689 errorText_ = "RtApiPulse::stopStream(): the stream is not open!";
8690 error( RtAudioError::INVALID_USE );
8693 if ( stream_.state == STREAM_STOPPED ) {
8694 errorText_ = "RtApiPulse::stopStream(): the stream is already stopped!";
8695 error( RtAudioError::WARNING );
8699 stream_.state = STREAM_STOPPED;
8700 MUTEX_LOCK( &stream_.mutex );
8703 pah->runnable = false;
8704 if ( pah->s_play ) {
8706 if ( pa_simple_drain( pah->s_play, &pa_error ) < 0 ) {
8707 errorStream_ << "RtApiPulse::stopStream: error draining output device, " <<
8708 pa_strerror( pa_error ) << ".";
8709 errorText_ = errorStream_.str();
8710 MUTEX_UNLOCK( &stream_.mutex );
8711 error( RtAudioError::SYSTEM_ERROR );
8717 stream_.state = STREAM_STOPPED;
8718 MUTEX_UNLOCK( &stream_.mutex );
8721 void RtApiPulse::abortStream( void )
8723 PulseAudioHandle *pah = static_cast<PulseAudioHandle*>( stream_.apiHandle );
8725 if ( stream_.state == STREAM_CLOSED ) {
8726 errorText_ = "RtApiPulse::abortStream(): the stream is not open!";
8727 error( RtAudioError::INVALID_USE );
8730 if ( stream_.state == STREAM_STOPPED ) {
8731 errorText_ = "RtApiPulse::abortStream(): the stream is already stopped!";
8732 error( RtAudioError::WARNING );
8736 stream_.state = STREAM_STOPPED;
8737 MUTEX_LOCK( &stream_.mutex );
8740 pah->runnable = false;
8741 if ( pah->s_play ) {
8743 if ( pa_simple_flush( pah->s_play, &pa_error ) < 0 ) {
8744 errorStream_ << "RtApiPulse::abortStream: error flushing output device, " <<
8745 pa_strerror( pa_error ) << ".";
8746 errorText_ = errorStream_.str();
8747 MUTEX_UNLOCK( &stream_.mutex );
8748 error( RtAudioError::SYSTEM_ERROR );
8754 stream_.state = STREAM_STOPPED;
8755 MUTEX_UNLOCK( &stream_.mutex );
8758 bool RtApiPulse::probeDeviceOpen( unsigned int device, StreamMode mode,
8759 unsigned int channels, unsigned int firstChannel,
8760 unsigned int sampleRate, RtAudioFormat format,
8761 unsigned int *bufferSize, RtAudio::StreamOptions *options )
8763 PulseAudioHandle *pah = 0;
8764 unsigned long bufferBytes = 0;
8767 if ( device != 0 ) return false;
8768 if ( mode != INPUT && mode != OUTPUT ) return false;
8769 if ( channels != 1 && channels != 2 ) {
8770 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported number of channels.";
8773 ss.channels = channels;
8775 if ( firstChannel != 0 ) return false;
8777 bool sr_found = false;
8778 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr ) {
8779 if ( sampleRate == *sr ) {
8781 stream_.sampleRate = sampleRate;
8782 ss.rate = sampleRate;
8787 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported sample rate.";
8792 for ( const rtaudio_pa_format_mapping_t *sf = supported_sampleformats;
8793 sf->rtaudio_format && sf->pa_format != PA_SAMPLE_INVALID; ++sf ) {
8794 if ( format == sf->rtaudio_format ) {
8796 stream_.userFormat = sf->rtaudio_format;
8797 stream_.deviceFormat[mode] = stream_.userFormat;
8798 ss.format = sf->pa_format;
8802 if ( !sf_found ) { // Use internal data format conversion.
8803 stream_.userFormat = format;
8804 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
8805 ss.format = PA_SAMPLE_FLOAT32LE;
8808 // Set other stream parameters.
8809 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
8810 else stream_.userInterleaved = true;
8811 stream_.deviceInterleaved[mode] = true;
8812 stream_.nBuffers = 1;
8813 stream_.doByteSwap[mode] = false;
8814 stream_.nUserChannels[mode] = channels;
8815 stream_.nDeviceChannels[mode] = channels + firstChannel;
8816 stream_.channelOffset[mode] = 0;
8817 std::string streamName = "RtAudio";
8819 // Set flags for buffer conversion.
8820 stream_.doConvertBuffer[mode] = false;
8821 if ( stream_.userFormat != stream_.deviceFormat[mode] )
8822 stream_.doConvertBuffer[mode] = true;
8823 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
8824 stream_.doConvertBuffer[mode] = true;
8825 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] )
8826 stream_.doConvertBuffer[mode] = true;
8828 // Allocate necessary internal buffers.
8829 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
8830 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
8831 if ( stream_.userBuffer[mode] == NULL ) {
8832 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating user buffer memory.";
8835 stream_.bufferSize = *bufferSize;
8837 if ( stream_.doConvertBuffer[mode] ) {
8839 bool makeBuffer = true;
8840 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
8841 if ( mode == INPUT ) {
8842 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
8843 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
8844 if ( bufferBytes <= bytesOut ) makeBuffer = false;
8849 bufferBytes *= *bufferSize;
8850 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
8851 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
8852 if ( stream_.deviceBuffer == NULL ) {
8853 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating device buffer memory.";
8859 stream_.device[mode] = device;
8861 // Setup the buffer conversion information structure.
8862 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
8864 if ( !stream_.apiHandle ) {
8865 PulseAudioHandle *pah = new PulseAudioHandle;
8867 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating memory for handle.";
8871 stream_.apiHandle = pah;
8872 if ( pthread_cond_init( &pah->runnable_cv, NULL ) != 0 ) {
8873 errorText_ = "RtApiPulse::probeDeviceOpen: error creating condition variable.";
8877 pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8880 if ( options && !options->streamName.empty() ) streamName = options->streamName;
8883 pa_buffer_attr buffer_attr;
8884 buffer_attr.fragsize = bufferBytes;
8885 buffer_attr.maxlength = -1;
8887 pah->s_rec = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_RECORD, NULL, "Record", &ss, NULL, &buffer_attr, &error );
8888 if ( !pah->s_rec ) {
8889 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting input to PulseAudio server.";
8894 pah->s_play = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_PLAYBACK, NULL, "Playback", &ss, NULL, NULL, &error );
8895 if ( !pah->s_play ) {
8896 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting output to PulseAudio server.";
8904 if ( stream_.mode == UNINITIALIZED )
8905 stream_.mode = mode;
8906 else if ( stream_.mode == mode )
8909 stream_.mode = DUPLEX;
8911 if ( !stream_.callbackInfo.isRunning ) {
8912 stream_.callbackInfo.object = this;
8914 stream_.state = STREAM_STOPPED;
8915 // Set the thread attributes for joinable and realtime scheduling
8916 // priority (optional). The higher priority will only take affect
8917 // if the program is run as root or suid. Note, under Linux
8918 // processes with CAP_SYS_NICE privilege, a user can change
8919 // scheduling policy and priority (thus need not be root). See
8920 // POSIX "capabilities".
8921 pthread_attr_t attr;
8922 pthread_attr_init( &attr );
8923 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
8924 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
8925 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
8926 stream_.callbackInfo.doRealtime = true;
8927 struct sched_param param;
8928 int priority = options->priority;
8929 int min = sched_get_priority_min( SCHED_RR );
8930 int max = sched_get_priority_max( SCHED_RR );
8931 if ( priority < min ) priority = min;
8932 else if ( priority > max ) priority = max;
8933 param.sched_priority = priority;
8935 // Set the policy BEFORE the priority. Otherwise it fails.
8936 pthread_attr_setschedpolicy(&attr, SCHED_RR);
8937 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
8938 // This is definitely required. Otherwise it fails.
8939 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
8940 pthread_attr_setschedparam(&attr, ¶m);
8943 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
8945 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
8948 stream_.callbackInfo.isRunning = true;
8949 int result = pthread_create( &pah->thread, &attr, pulseaudio_callback, (void *)&stream_.callbackInfo);
8950 pthread_attr_destroy(&attr);
8952 // Failed. Try instead with default attributes.
8953 result = pthread_create( &pah->thread, NULL, pulseaudio_callback, (void *)&stream_.callbackInfo);
8955 stream_.callbackInfo.isRunning = false;
8956 errorText_ = "RtApiPulse::probeDeviceOpen: error creating thread.";
8965 if ( pah && stream_.callbackInfo.isRunning ) {
8966 pthread_cond_destroy( &pah->runnable_cv );
8968 stream_.apiHandle = 0;
8971 for ( int i=0; i<2; i++ ) {
8972 if ( stream_.userBuffer[i] ) {
8973 free( stream_.userBuffer[i] );
8974 stream_.userBuffer[i] = 0;
8978 if ( stream_.deviceBuffer ) {
8979 free( stream_.deviceBuffer );
8980 stream_.deviceBuffer = 0;
8983 stream_.state = STREAM_CLOSED;
8987 //******************** End of __LINUX_PULSE__ *********************//
8990 #if defined(__LINUX_OSS__)
8993 #include <sys/ioctl.h>
8996 #include <sys/soundcard.h>
9000 static void *ossCallbackHandler(void * ptr);
9002 // A structure to hold various information related to the OSS API
9005 int id[2]; // device ids
9008 pthread_cond_t runnable;
9011 :triggered(false) { id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
9014 RtApiOss :: RtApiOss()
9016 // Nothing to do here.
9019 RtApiOss :: ~RtApiOss()
9021 if ( stream_.state != STREAM_CLOSED ) closeStream();
9024 unsigned int RtApiOss :: getDeviceCount( void )
9026 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
9027 if ( mixerfd == -1 ) {
9028 errorText_ = "RtApiOss::getDeviceCount: error opening '/dev/mixer'.";
9029 error( RtAudioError::WARNING );
9033 oss_sysinfo sysinfo;
9034 if ( ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo ) == -1 ) {
9036 errorText_ = "RtApiOss::getDeviceCount: error getting sysinfo, OSS version >= 4.0 is required.";
9037 error( RtAudioError::WARNING );
9042 return sysinfo.numaudios;
9045 RtAudio::DeviceInfo RtApiOss :: getDeviceInfo( unsigned int device )
9047 RtAudio::DeviceInfo info;
9048 info.probed = false;
9050 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
9051 if ( mixerfd == -1 ) {
9052 errorText_ = "RtApiOss::getDeviceInfo: error opening '/dev/mixer'.";
9053 error( RtAudioError::WARNING );
9057 oss_sysinfo sysinfo;
9058 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
9059 if ( result == -1 ) {
9061 errorText_ = "RtApiOss::getDeviceInfo: error getting sysinfo, OSS version >= 4.0 is required.";
9062 error( RtAudioError::WARNING );
9066 unsigned nDevices = sysinfo.numaudios;
9067 if ( nDevices == 0 ) {
9069 errorText_ = "RtApiOss::getDeviceInfo: no devices found!";
9070 error( RtAudioError::INVALID_USE );
9074 if ( device >= nDevices ) {
9076 errorText_ = "RtApiOss::getDeviceInfo: device ID is invalid!";
9077 error( RtAudioError::INVALID_USE );
9081 oss_audioinfo ainfo;
9083 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
9085 if ( result == -1 ) {
9086 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
9087 errorText_ = errorStream_.str();
9088 error( RtAudioError::WARNING );
9093 if ( ainfo.caps & PCM_CAP_OUTPUT ) info.outputChannels = ainfo.max_channels;
9094 if ( ainfo.caps & PCM_CAP_INPUT ) info.inputChannels = ainfo.max_channels;
9095 if ( ainfo.caps & PCM_CAP_DUPLEX ) {
9096 if ( info.outputChannels > 0 && info.inputChannels > 0 && ainfo.caps & PCM_CAP_DUPLEX )
9097 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
9100 // Probe data formats ... do for input
9101 unsigned long mask = ainfo.iformats;
9102 if ( mask & AFMT_S16_LE || mask & AFMT_S16_BE )
9103 info.nativeFormats |= RTAUDIO_SINT16;
9104 if ( mask & AFMT_S8 )
9105 info.nativeFormats |= RTAUDIO_SINT8;
9106 if ( mask & AFMT_S32_LE || mask & AFMT_S32_BE )
9107 info.nativeFormats |= RTAUDIO_SINT32;
9109 if ( mask & AFMT_FLOAT )
9110 info.nativeFormats |= RTAUDIO_FLOAT32;
9112 if ( mask & AFMT_S24_LE || mask & AFMT_S24_BE )
9113 info.nativeFormats |= RTAUDIO_SINT24;
9115 // Check that we have at least one supported format
9116 if ( info.nativeFormats == 0 ) {
9117 errorStream_ << "RtApiOss::getDeviceInfo: device (" << ainfo.name << ") data format not supported by RtAudio.";
9118 errorText_ = errorStream_.str();
9119 error( RtAudioError::WARNING );
9123 // Probe the supported sample rates.
9124 info.sampleRates.clear();
9125 if ( ainfo.nrates ) {
9126 for ( unsigned int i=0; i<ainfo.nrates; i++ ) {
9127 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
9128 if ( ainfo.rates[i] == SAMPLE_RATES[k] ) {
9129 info.sampleRates.push_back( SAMPLE_RATES[k] );
9131 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
9132 info.preferredSampleRate = SAMPLE_RATES[k];
9140 // Check min and max rate values;
9141 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
9142 if ( ainfo.min_rate <= (int) SAMPLE_RATES[k] && ainfo.max_rate >= (int) SAMPLE_RATES[k] ) {
9143 info.sampleRates.push_back( SAMPLE_RATES[k] );
9145 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
9146 info.preferredSampleRate = SAMPLE_RATES[k];
9151 if ( info.sampleRates.size() == 0 ) {
9152 errorStream_ << "RtApiOss::getDeviceInfo: no supported sample rates found for device (" << ainfo.name << ").";
9153 errorText_ = errorStream_.str();
9154 error( RtAudioError::WARNING );
9158 info.name = ainfo.name;
9165 bool RtApiOss :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
9166 unsigned int firstChannel, unsigned int sampleRate,
9167 RtAudioFormat format, unsigned int *bufferSize,
9168 RtAudio::StreamOptions *options )
9170 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
9171 if ( mixerfd == -1 ) {
9172 errorText_ = "RtApiOss::probeDeviceOpen: error opening '/dev/mixer'.";
9176 oss_sysinfo sysinfo;
9177 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
9178 if ( result == -1 ) {
9180 errorText_ = "RtApiOss::probeDeviceOpen: error getting sysinfo, OSS version >= 4.0 is required.";
9184 unsigned nDevices = sysinfo.numaudios;
9185 if ( nDevices == 0 ) {
9186 // This should not happen because a check is made before this function is called.
9188 errorText_ = "RtApiOss::probeDeviceOpen: no devices found!";
9192 if ( device >= nDevices ) {
9193 // This should not happen because a check is made before this function is called.
9195 errorText_ = "RtApiOss::probeDeviceOpen: device ID is invalid!";
9199 oss_audioinfo ainfo;
9201 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
9203 if ( result == -1 ) {
9204 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
9205 errorText_ = errorStream_.str();
9209 // Check if device supports input or output
9210 if ( ( mode == OUTPUT && !( ainfo.caps & PCM_CAP_OUTPUT ) ) ||
9211 ( mode == INPUT && !( ainfo.caps & PCM_CAP_INPUT ) ) ) {
9212 if ( mode == OUTPUT )
9213 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support output.";
9215 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support input.";
9216 errorText_ = errorStream_.str();
9221 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9222 if ( mode == OUTPUT )
9224 else { // mode == INPUT
9225 if (stream_.mode == OUTPUT && stream_.device[0] == device) {
9226 // We just set the same device for playback ... close and reopen for duplex (OSS only).
9227 close( handle->id[0] );
9229 if ( !( ainfo.caps & PCM_CAP_DUPLEX ) ) {
9230 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support duplex mode.";
9231 errorText_ = errorStream_.str();
9234 // Check that the number previously set channels is the same.
9235 if ( stream_.nUserChannels[0] != channels ) {
9236 errorStream_ << "RtApiOss::probeDeviceOpen: input/output channels must be equal for OSS duplex device (" << ainfo.name << ").";
9237 errorText_ = errorStream_.str();
9246 // Set exclusive access if specified.
9247 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) flags |= O_EXCL;
9249 // Try to open the device.
9251 fd = open( ainfo.devnode, flags, 0 );
9253 if ( errno == EBUSY )
9254 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") is busy.";
9256 errorStream_ << "RtApiOss::probeDeviceOpen: error opening device (" << ainfo.name << ").";
9257 errorText_ = errorStream_.str();
9261 // For duplex operation, specifically set this mode (this doesn't seem to work).
9263 if ( flags | O_RDWR ) {
9264 result = ioctl( fd, SNDCTL_DSP_SETDUPLEX, NULL );
9265 if ( result == -1) {
9266 errorStream_ << "RtApiOss::probeDeviceOpen: error setting duplex mode for device (" << ainfo.name << ").";
9267 errorText_ = errorStream_.str();
9273 // Check the device channel support.
9274 stream_.nUserChannels[mode] = channels;
9275 if ( ainfo.max_channels < (int)(channels + firstChannel) ) {
9277 errorStream_ << "RtApiOss::probeDeviceOpen: the device (" << ainfo.name << ") does not support requested channel parameters.";
9278 errorText_ = errorStream_.str();
9282 // Set the number of channels.
9283 int deviceChannels = channels + firstChannel;
9284 result = ioctl( fd, SNDCTL_DSP_CHANNELS, &deviceChannels );
9285 if ( result == -1 || deviceChannels < (int)(channels + firstChannel) ) {
9287 errorStream_ << "RtApiOss::probeDeviceOpen: error setting channel parameters on device (" << ainfo.name << ").";
9288 errorText_ = errorStream_.str();
9291 stream_.nDeviceChannels[mode] = deviceChannels;
9293 // Get the data format mask
9295 result = ioctl( fd, SNDCTL_DSP_GETFMTS, &mask );
9296 if ( result == -1 ) {
9298 errorStream_ << "RtApiOss::probeDeviceOpen: error getting device (" << ainfo.name << ") data formats.";
9299 errorText_ = errorStream_.str();
9303 // Determine how to set the device format.
9304 stream_.userFormat = format;
9305 int deviceFormat = -1;
9306 stream_.doByteSwap[mode] = false;
9307 if ( format == RTAUDIO_SINT8 ) {
9308 if ( mask & AFMT_S8 ) {
9309 deviceFormat = AFMT_S8;
9310 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
9313 else if ( format == RTAUDIO_SINT16 ) {
9314 if ( mask & AFMT_S16_NE ) {
9315 deviceFormat = AFMT_S16_NE;
9316 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9318 else if ( mask & AFMT_S16_OE ) {
9319 deviceFormat = AFMT_S16_OE;
9320 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9321 stream_.doByteSwap[mode] = true;
9324 else if ( format == RTAUDIO_SINT24 ) {
9325 if ( mask & AFMT_S24_NE ) {
9326 deviceFormat = AFMT_S24_NE;
9327 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9329 else if ( mask & AFMT_S24_OE ) {
9330 deviceFormat = AFMT_S24_OE;
9331 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9332 stream_.doByteSwap[mode] = true;
9335 else if ( format == RTAUDIO_SINT32 ) {
9336 if ( mask & AFMT_S32_NE ) {
9337 deviceFormat = AFMT_S32_NE;
9338 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9340 else if ( mask & AFMT_S32_OE ) {
9341 deviceFormat = AFMT_S32_OE;
9342 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9343 stream_.doByteSwap[mode] = true;
9347 if ( deviceFormat == -1 ) {
9348 // The user requested format is not natively supported by the device.
9349 if ( mask & AFMT_S16_NE ) {
9350 deviceFormat = AFMT_S16_NE;
9351 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9353 else if ( mask & AFMT_S32_NE ) {
9354 deviceFormat = AFMT_S32_NE;
9355 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9357 else if ( mask & AFMT_S24_NE ) {
9358 deviceFormat = AFMT_S24_NE;
9359 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9361 else if ( mask & AFMT_S16_OE ) {
9362 deviceFormat = AFMT_S16_OE;
9363 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9364 stream_.doByteSwap[mode] = true;
9366 else if ( mask & AFMT_S32_OE ) {
9367 deviceFormat = AFMT_S32_OE;
9368 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9369 stream_.doByteSwap[mode] = true;
9371 else if ( mask & AFMT_S24_OE ) {
9372 deviceFormat = AFMT_S24_OE;
9373 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9374 stream_.doByteSwap[mode] = true;
9376 else if ( mask & AFMT_S8) {
9377 deviceFormat = AFMT_S8;
9378 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
9382 if ( stream_.deviceFormat[mode] == 0 ) {
9383 // This really shouldn't happen ...
9385 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") data format not supported by RtAudio.";
9386 errorText_ = errorStream_.str();
9390 // Set the data format.
9391 int temp = deviceFormat;
9392 result = ioctl( fd, SNDCTL_DSP_SETFMT, &deviceFormat );
9393 if ( result == -1 || deviceFormat != temp ) {
9395 errorStream_ << "RtApiOss::probeDeviceOpen: error setting data format on device (" << ainfo.name << ").";
9396 errorText_ = errorStream_.str();
9400 // Attempt to set the buffer size. According to OSS, the minimum
9401 // number of buffers is two. The supposed minimum buffer size is 16
9402 // bytes, so that will be our lower bound. The argument to this
9403 // call is in the form 0xMMMMSSSS (hex), where the buffer size (in
9404 // bytes) is given as 2^SSSS and the number of buffers as 2^MMMM.
9405 // We'll check the actual value used near the end of the setup
9407 int ossBufferBytes = *bufferSize * formatBytes( stream_.deviceFormat[mode] ) * deviceChannels;
9408 if ( ossBufferBytes < 16 ) ossBufferBytes = 16;
9410 if ( options ) buffers = options->numberOfBuffers;
9411 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) buffers = 2;
9412 if ( buffers < 2 ) buffers = 3;
9413 temp = ((int) buffers << 16) + (int)( log10( (double)ossBufferBytes ) / log10( 2.0 ) );
9414 result = ioctl( fd, SNDCTL_DSP_SETFRAGMENT, &temp );
9415 if ( result == -1 ) {
9417 errorStream_ << "RtApiOss::probeDeviceOpen: error setting buffer size on device (" << ainfo.name << ").";
9418 errorText_ = errorStream_.str();
9421 stream_.nBuffers = buffers;
9423 // Save buffer size (in sample frames).
9424 *bufferSize = ossBufferBytes / ( formatBytes(stream_.deviceFormat[mode]) * deviceChannels );
9425 stream_.bufferSize = *bufferSize;
9427 // Set the sample rate.
9428 int srate = sampleRate;
9429 result = ioctl( fd, SNDCTL_DSP_SPEED, &srate );
9430 if ( result == -1 ) {
9432 errorStream_ << "RtApiOss::probeDeviceOpen: error setting sample rate (" << sampleRate << ") on device (" << ainfo.name << ").";
9433 errorText_ = errorStream_.str();
9437 // Verify the sample rate setup worked.
9438 if ( abs( srate - (int)sampleRate ) > 100 ) {
9440 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support sample rate (" << sampleRate << ").";
9441 errorText_ = errorStream_.str();
9444 stream_.sampleRate = sampleRate;
9446 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device) {
9447 // We're doing duplex setup here.
9448 stream_.deviceFormat[0] = stream_.deviceFormat[1];
9449 stream_.nDeviceChannels[0] = deviceChannels;
9452 // Set interleaving parameters.
9453 stream_.userInterleaved = true;
9454 stream_.deviceInterleaved[mode] = true;
9455 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
9456 stream_.userInterleaved = false;
9458 // Set flags for buffer conversion
9459 stream_.doConvertBuffer[mode] = false;
9460 if ( stream_.userFormat != stream_.deviceFormat[mode] )
9461 stream_.doConvertBuffer[mode] = true;
9462 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
9463 stream_.doConvertBuffer[mode] = true;
9464 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
9465 stream_.nUserChannels[mode] > 1 )
9466 stream_.doConvertBuffer[mode] = true;
9468 // Allocate the stream handles if necessary and then save.
9469 if ( stream_.apiHandle == 0 ) {
9471 handle = new OssHandle;
9473 catch ( std::bad_alloc& ) {
9474 errorText_ = "RtApiOss::probeDeviceOpen: error allocating OssHandle memory.";
9478 if ( pthread_cond_init( &handle->runnable, NULL ) ) {
9479 errorText_ = "RtApiOss::probeDeviceOpen: error initializing pthread condition variable.";
9483 stream_.apiHandle = (void *) handle;
9486 handle = (OssHandle *) stream_.apiHandle;
9488 handle->id[mode] = fd;
9490 // Allocate necessary internal buffers.
9491 unsigned long bufferBytes;
9492 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
9493 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
9494 if ( stream_.userBuffer[mode] == NULL ) {
9495 errorText_ = "RtApiOss::probeDeviceOpen: error allocating user buffer memory.";
9499 if ( stream_.doConvertBuffer[mode] ) {
9501 bool makeBuffer = true;
9502 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
9503 if ( mode == INPUT ) {
9504 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
9505 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
9506 if ( bufferBytes <= bytesOut ) makeBuffer = false;
9511 bufferBytes *= *bufferSize;
9512 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
9513 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
9514 if ( stream_.deviceBuffer == NULL ) {
9515 errorText_ = "RtApiOss::probeDeviceOpen: error allocating device buffer memory.";
9521 stream_.device[mode] = device;
9522 stream_.state = STREAM_STOPPED;
9524 // Setup the buffer conversion information structure.
9525 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
9527 // Setup thread if necessary.
9528 if ( stream_.mode == OUTPUT && mode == INPUT ) {
9529 // We had already set up an output stream.
9530 stream_.mode = DUPLEX;
9531 if ( stream_.device[0] == device ) handle->id[0] = fd;
9534 stream_.mode = mode;
9536 // Setup callback thread.
9537 stream_.callbackInfo.object = (void *) this;
9539 // Set the thread attributes for joinable and realtime scheduling
9540 // priority. The higher priority will only take affect if the
9541 // program is run as root or suid.
9542 pthread_attr_t attr;
9543 pthread_attr_init( &attr );
9544 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
9545 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
9546 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
9547 stream_.callbackInfo.doRealtime = true;
9548 struct sched_param param;
9549 int priority = options->priority;
9550 int min = sched_get_priority_min( SCHED_RR );
9551 int max = sched_get_priority_max( SCHED_RR );
9552 if ( priority < min ) priority = min;
9553 else if ( priority > max ) priority = max;
9554 param.sched_priority = priority;
9556 // Set the policy BEFORE the priority. Otherwise it fails.
9557 pthread_attr_setschedpolicy(&attr, SCHED_RR);
9558 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
9559 // This is definitely required. Otherwise it fails.
9560 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
9561 pthread_attr_setschedparam(&attr, ¶m);
9564 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
9566 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
9569 stream_.callbackInfo.isRunning = true;
9570 result = pthread_create( &stream_.callbackInfo.thread, &attr, ossCallbackHandler, &stream_.callbackInfo );
9571 pthread_attr_destroy( &attr );
9573 // Failed. Try instead with default attributes.
9574 result = pthread_create( &stream_.callbackInfo.thread, NULL, ossCallbackHandler, &stream_.callbackInfo );
9576 stream_.callbackInfo.isRunning = false;
9577 errorText_ = "RtApiOss::error creating callback thread!";
9587 pthread_cond_destroy( &handle->runnable );
9588 if ( handle->id[0] ) close( handle->id[0] );
9589 if ( handle->id[1] ) close( handle->id[1] );
9591 stream_.apiHandle = 0;
9594 for ( int i=0; i<2; i++ ) {
9595 if ( stream_.userBuffer[i] ) {
9596 free( stream_.userBuffer[i] );
9597 stream_.userBuffer[i] = 0;
9601 if ( stream_.deviceBuffer ) {
9602 free( stream_.deviceBuffer );
9603 stream_.deviceBuffer = 0;
9606 stream_.state = STREAM_CLOSED;
9610 void RtApiOss :: closeStream()
9612 if ( stream_.state == STREAM_CLOSED ) {
9613 errorText_ = "RtApiOss::closeStream(): no open stream to close!";
9614 error( RtAudioError::WARNING );
9618 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9619 stream_.callbackInfo.isRunning = false;
9620 MUTEX_LOCK( &stream_.mutex );
9621 if ( stream_.state == STREAM_STOPPED )
9622 pthread_cond_signal( &handle->runnable );
9623 MUTEX_UNLOCK( &stream_.mutex );
9624 pthread_join( stream_.callbackInfo.thread, NULL );
9626 if ( stream_.state == STREAM_RUNNING ) {
9627 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
9628 ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9630 ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9631 stream_.state = STREAM_STOPPED;
9635 pthread_cond_destroy( &handle->runnable );
9636 if ( handle->id[0] ) close( handle->id[0] );
9637 if ( handle->id[1] ) close( handle->id[1] );
9639 stream_.apiHandle = 0;
9642 for ( int i=0; i<2; i++ ) {
9643 if ( stream_.userBuffer[i] ) {
9644 free( stream_.userBuffer[i] );
9645 stream_.userBuffer[i] = 0;
9649 if ( stream_.deviceBuffer ) {
9650 free( stream_.deviceBuffer );
9651 stream_.deviceBuffer = 0;
9654 stream_.mode = UNINITIALIZED;
9655 stream_.state = STREAM_CLOSED;
9658 void RtApiOss :: startStream()
9661 if ( stream_.state == STREAM_RUNNING ) {
9662 errorText_ = "RtApiOss::startStream(): the stream is already running!";
9663 error( RtAudioError::WARNING );
9667 MUTEX_LOCK( &stream_.mutex );
9669 #if defined( HAVE_GETTIMEOFDAY )
9670 gettimeofday( &stream_.lastTickTimestamp, NULL );
9673 stream_.state = STREAM_RUNNING;
9675 // No need to do anything else here ... OSS automatically starts
9676 // when fed samples.
9678 MUTEX_UNLOCK( &stream_.mutex );
9680 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9681 pthread_cond_signal( &handle->runnable );
9684 void RtApiOss :: stopStream()
9687 if ( stream_.state == STREAM_STOPPED ) {
9688 errorText_ = "RtApiOss::stopStream(): the stream is already stopped!";
9689 error( RtAudioError::WARNING );
9693 MUTEX_LOCK( &stream_.mutex );
9695 // The state might change while waiting on a mutex.
9696 if ( stream_.state == STREAM_STOPPED ) {
9697 MUTEX_UNLOCK( &stream_.mutex );
9702 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9703 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9705 // Flush the output with zeros a few times.
9708 RtAudioFormat format;
9710 if ( stream_.doConvertBuffer[0] ) {
9711 buffer = stream_.deviceBuffer;
9712 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
9713 format = stream_.deviceFormat[0];
9716 buffer = stream_.userBuffer[0];
9717 samples = stream_.bufferSize * stream_.nUserChannels[0];
9718 format = stream_.userFormat;
9721 memset( buffer, 0, samples * formatBytes(format) );
9722 for ( unsigned int i=0; i<stream_.nBuffers+1; i++ ) {
9723 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9724 if ( result == -1 ) {
9725 errorText_ = "RtApiOss::stopStream: audio write error.";
9726 error( RtAudioError::WARNING );
9730 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9731 if ( result == -1 ) {
9732 errorStream_ << "RtApiOss::stopStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
9733 errorText_ = errorStream_.str();
9736 handle->triggered = false;
9739 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
9740 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9741 if ( result == -1 ) {
9742 errorStream_ << "RtApiOss::stopStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
9743 errorText_ = errorStream_.str();
9749 stream_.state = STREAM_STOPPED;
9750 MUTEX_UNLOCK( &stream_.mutex );
9752 if ( result != -1 ) return;
9753 error( RtAudioError::SYSTEM_ERROR );
9756 void RtApiOss :: abortStream()
9759 if ( stream_.state == STREAM_STOPPED ) {
9760 errorText_ = "RtApiOss::abortStream(): the stream is already stopped!";
9761 error( RtAudioError::WARNING );
9765 MUTEX_LOCK( &stream_.mutex );
9767 // The state might change while waiting on a mutex.
9768 if ( stream_.state == STREAM_STOPPED ) {
9769 MUTEX_UNLOCK( &stream_.mutex );
9774 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9775 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9776 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9777 if ( result == -1 ) {
9778 errorStream_ << "RtApiOss::abortStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
9779 errorText_ = errorStream_.str();
9782 handle->triggered = false;
9785 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
9786 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9787 if ( result == -1 ) {
9788 errorStream_ << "RtApiOss::abortStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
9789 errorText_ = errorStream_.str();
9795 stream_.state = STREAM_STOPPED;
9796 MUTEX_UNLOCK( &stream_.mutex );
9798 if ( result != -1 ) return;
9799 error( RtAudioError::SYSTEM_ERROR );
9802 void RtApiOss :: callbackEvent()
9804 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9805 if ( stream_.state == STREAM_STOPPED ) {
9806 MUTEX_LOCK( &stream_.mutex );
9807 pthread_cond_wait( &handle->runnable, &stream_.mutex );
9808 if ( stream_.state != STREAM_RUNNING ) {
9809 MUTEX_UNLOCK( &stream_.mutex );
9812 MUTEX_UNLOCK( &stream_.mutex );
9815 if ( stream_.state == STREAM_CLOSED ) {
9816 errorText_ = "RtApiOss::callbackEvent(): the stream is closed ... this shouldn't happen!";
9817 error( RtAudioError::WARNING );
9821 // Invoke user callback to get fresh output data.
9822 int doStopStream = 0;
9823 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
9824 double streamTime = getStreamTime();
9825 RtAudioStreamStatus status = 0;
9826 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
9827 status |= RTAUDIO_OUTPUT_UNDERFLOW;
9828 handle->xrun[0] = false;
9830 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
9831 status |= RTAUDIO_INPUT_OVERFLOW;
9832 handle->xrun[1] = false;
9834 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
9835 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
9836 if ( doStopStream == 2 ) {
9837 this->abortStream();
9841 MUTEX_LOCK( &stream_.mutex );
9843 // The state might change while waiting on a mutex.
9844 if ( stream_.state == STREAM_STOPPED ) goto unlock;
9849 RtAudioFormat format;
9851 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9853 // Setup parameters and do buffer conversion if necessary.
9854 if ( stream_.doConvertBuffer[0] ) {
9855 buffer = stream_.deviceBuffer;
9856 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
9857 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
9858 format = stream_.deviceFormat[0];
9861 buffer = stream_.userBuffer[0];
9862 samples = stream_.bufferSize * stream_.nUserChannels[0];
9863 format = stream_.userFormat;
9866 // Do byte swapping if necessary.
9867 if ( stream_.doByteSwap[0] )
9868 byteSwapBuffer( buffer, samples, format );
9870 if ( stream_.mode == DUPLEX && handle->triggered == false ) {
9872 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
9873 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9874 trig = PCM_ENABLE_INPUT|PCM_ENABLE_OUTPUT;
9875 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
9876 handle->triggered = true;
9879 // Write samples to device.
9880 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9882 if ( result == -1 ) {
9883 // We'll assume this is an underrun, though there isn't a
9884 // specific means for determining that.
9885 handle->xrun[0] = true;
9886 errorText_ = "RtApiOss::callbackEvent: audio write error.";
9887 error( RtAudioError::WARNING );
9888 // Continue on to input section.
9892 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
9894 // Setup parameters.
9895 if ( stream_.doConvertBuffer[1] ) {
9896 buffer = stream_.deviceBuffer;
9897 samples = stream_.bufferSize * stream_.nDeviceChannels[1];
9898 format = stream_.deviceFormat[1];
9901 buffer = stream_.userBuffer[1];
9902 samples = stream_.bufferSize * stream_.nUserChannels[1];
9903 format = stream_.userFormat;
9906 // Read samples from device.
9907 result = read( handle->id[1], buffer, samples * formatBytes(format) );
9909 if ( result == -1 ) {
9910 // We'll assume this is an overrun, though there isn't a
9911 // specific means for determining that.
9912 handle->xrun[1] = true;
9913 errorText_ = "RtApiOss::callbackEvent: audio read error.";
9914 error( RtAudioError::WARNING );
9918 // Do byte swapping if necessary.
9919 if ( stream_.doByteSwap[1] )
9920 byteSwapBuffer( buffer, samples, format );
9922 // Do buffer conversion if necessary.
9923 if ( stream_.doConvertBuffer[1] )
9924 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
9928 MUTEX_UNLOCK( &stream_.mutex );
9930 RtApi::tickStreamTime();
9931 if ( doStopStream == 1 ) this->stopStream();
9934 static void *ossCallbackHandler( void *ptr )
9936 CallbackInfo *info = (CallbackInfo *) ptr;
9937 RtApiOss *object = (RtApiOss *) info->object;
9938 bool *isRunning = &info->isRunning;
9940 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
9941 if (info->doRealtime) {
9942 std::cerr << "RtAudio oss: " <<
9943 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
9944 "running realtime scheduling" << std::endl;
9948 while ( *isRunning == true ) {
9949 pthread_testcancel();
9950 object->callbackEvent();
9953 pthread_exit( NULL );
9956 //******************** End of __LINUX_OSS__ *********************//
9960 // *************************************************** //
9962 // Protected common (OS-independent) RtAudio methods.
9964 // *************************************************** //
9966 // This method can be modified to control the behavior of error
9967 // message printing.
9968 void RtApi :: error( RtAudioError::Type type )
9970 errorStream_.str(""); // clear the ostringstream
9972 RtAudioErrorCallback errorCallback = (RtAudioErrorCallback) stream_.callbackInfo.errorCallback;
9973 if ( errorCallback ) {
9974 // abortStream() can generate new error messages. Ignore them. Just keep original one.
9976 if ( firstErrorOccurred_ )
9979 firstErrorOccurred_ = true;
9980 const std::string errorMessage = errorText_;
9982 if ( type != RtAudioError::WARNING && stream_.state != STREAM_STOPPED) {
9983 stream_.callbackInfo.isRunning = false; // exit from the thread
9987 errorCallback( type, errorMessage );
9988 firstErrorOccurred_ = false;
9992 if ( type == RtAudioError::WARNING && showWarnings_ == true )
9993 std::cerr << '\n' << errorText_ << "\n\n";
9994 else if ( type != RtAudioError::WARNING )
9995 throw( RtAudioError( errorText_, type ) );
9998 void RtApi :: verifyStream()
10000 if ( stream_.state == STREAM_CLOSED ) {
10001 errorText_ = "RtApi:: a stream is not open!";
10002 error( RtAudioError::INVALID_USE );
10006 void RtApi :: clearStreamInfo()
10008 stream_.mode = UNINITIALIZED;
10009 stream_.state = STREAM_CLOSED;
10010 stream_.sampleRate = 0;
10011 stream_.bufferSize = 0;
10012 stream_.nBuffers = 0;
10013 stream_.userFormat = 0;
10014 stream_.userInterleaved = true;
10015 stream_.streamTime = 0.0;
10016 stream_.apiHandle = 0;
10017 stream_.deviceBuffer = 0;
10018 stream_.callbackInfo.callback = 0;
10019 stream_.callbackInfo.userData = 0;
10020 stream_.callbackInfo.isRunning = false;
10021 stream_.callbackInfo.errorCallback = 0;
10022 for ( int i=0; i<2; i++ ) {
10023 stream_.device[i] = 11111;
10024 stream_.doConvertBuffer[i] = false;
10025 stream_.deviceInterleaved[i] = true;
10026 stream_.doByteSwap[i] = false;
10027 stream_.nUserChannels[i] = 0;
10028 stream_.nDeviceChannels[i] = 0;
10029 stream_.channelOffset[i] = 0;
10030 stream_.deviceFormat[i] = 0;
10031 stream_.latency[i] = 0;
10032 stream_.userBuffer[i] = 0;
10033 stream_.convertInfo[i].channels = 0;
10034 stream_.convertInfo[i].inJump = 0;
10035 stream_.convertInfo[i].outJump = 0;
10036 stream_.convertInfo[i].inFormat = 0;
10037 stream_.convertInfo[i].outFormat = 0;
10038 stream_.convertInfo[i].inOffset.clear();
10039 stream_.convertInfo[i].outOffset.clear();
10043 unsigned int RtApi :: formatBytes( RtAudioFormat format )
10045 if ( format == RTAUDIO_SINT16 )
10047 else if ( format == RTAUDIO_SINT32 || format == RTAUDIO_FLOAT32 )
10049 else if ( format == RTAUDIO_FLOAT64 )
10051 else if ( format == RTAUDIO_SINT24 )
10053 else if ( format == RTAUDIO_SINT8 )
10056 errorText_ = "RtApi::formatBytes: undefined format.";
10057 error( RtAudioError::WARNING );
10062 void RtApi :: setConvertInfo( StreamMode mode, unsigned int firstChannel )
10064 if ( mode == INPUT ) { // convert device to user buffer
10065 stream_.convertInfo[mode].inJump = stream_.nDeviceChannels[1];
10066 stream_.convertInfo[mode].outJump = stream_.nUserChannels[1];
10067 stream_.convertInfo[mode].inFormat = stream_.deviceFormat[1];
10068 stream_.convertInfo[mode].outFormat = stream_.userFormat;
10070 else { // convert user to device buffer
10071 stream_.convertInfo[mode].inJump = stream_.nUserChannels[0];
10072 stream_.convertInfo[mode].outJump = stream_.nDeviceChannels[0];
10073 stream_.convertInfo[mode].inFormat = stream_.userFormat;
10074 stream_.convertInfo[mode].outFormat = stream_.deviceFormat[0];
10077 if ( stream_.convertInfo[mode].inJump < stream_.convertInfo[mode].outJump )
10078 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].inJump;
10080 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].outJump;
10082 // Set up the interleave/deinterleave offsets.
10083 if ( stream_.deviceInterleaved[mode] != stream_.userInterleaved ) {
10084 if ( ( mode == OUTPUT && stream_.deviceInterleaved[mode] ) ||
10085 ( mode == INPUT && stream_.userInterleaved ) ) {
10086 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
10087 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
10088 stream_.convertInfo[mode].outOffset.push_back( k );
10089 stream_.convertInfo[mode].inJump = 1;
10093 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
10094 stream_.convertInfo[mode].inOffset.push_back( k );
10095 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
10096 stream_.convertInfo[mode].outJump = 1;
10100 else { // no (de)interleaving
10101 if ( stream_.userInterleaved ) {
10102 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
10103 stream_.convertInfo[mode].inOffset.push_back( k );
10104 stream_.convertInfo[mode].outOffset.push_back( k );
10108 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
10109 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
10110 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
10111 stream_.convertInfo[mode].inJump = 1;
10112 stream_.convertInfo[mode].outJump = 1;
10117 // Add channel offset.
10118 if ( firstChannel > 0 ) {
10119 if ( stream_.deviceInterleaved[mode] ) {
10120 if ( mode == OUTPUT ) {
10121 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
10122 stream_.convertInfo[mode].outOffset[k] += firstChannel;
10125 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
10126 stream_.convertInfo[mode].inOffset[k] += firstChannel;
10130 if ( mode == OUTPUT ) {
10131 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
10132 stream_.convertInfo[mode].outOffset[k] += ( firstChannel * stream_.bufferSize );
10135 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
10136 stream_.convertInfo[mode].inOffset[k] += ( firstChannel * stream_.bufferSize );
10142 void RtApi :: convertBuffer( char *outBuffer, char *inBuffer, ConvertInfo &info )
10144 // This function does format conversion, input/output channel compensation, and
10145 // data interleaving/deinterleaving. 24-bit integers are assumed to occupy
10146 // the lower three bytes of a 32-bit integer.
10148 // Clear our device buffer when in/out duplex device channels are different
10149 if ( outBuffer == stream_.deviceBuffer && stream_.mode == DUPLEX &&
10150 ( stream_.nDeviceChannels[0] < stream_.nDeviceChannels[1] ) )
10151 memset( outBuffer, 0, stream_.bufferSize * info.outJump * formatBytes( info.outFormat ) );
10154 if (info.outFormat == RTAUDIO_FLOAT64) {
10156 Float64 *out = (Float64 *)outBuffer;
10158 if (info.inFormat == RTAUDIO_SINT8) {
10159 signed char *in = (signed char *)inBuffer;
10160 scale = 1.0 / 127.5;
10161 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10162 for (j=0; j<info.channels; j++) {
10163 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
10164 out[info.outOffset[j]] += 0.5;
10165 out[info.outOffset[j]] *= scale;
10168 out += info.outJump;
10171 else if (info.inFormat == RTAUDIO_SINT16) {
10172 Int16 *in = (Int16 *)inBuffer;
10173 scale = 1.0 / 32767.5;
10174 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10175 for (j=0; j<info.channels; j++) {
10176 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
10177 out[info.outOffset[j]] += 0.5;
10178 out[info.outOffset[j]] *= scale;
10181 out += info.outJump;
10184 else if (info.inFormat == RTAUDIO_SINT24) {
10185 Int24 *in = (Int24 *)inBuffer;
10186 scale = 1.0 / 8388607.5;
10187 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10188 for (j=0; j<info.channels; j++) {
10189 out[info.outOffset[j]] = (Float64) (in[info.inOffset[j]].asInt());
10190 out[info.outOffset[j]] += 0.5;
10191 out[info.outOffset[j]] *= scale;
10194 out += info.outJump;
10197 else if (info.inFormat == RTAUDIO_SINT32) {
10198 Int32 *in = (Int32 *)inBuffer;
10199 scale = 1.0 / 2147483647.5;
10200 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10201 for (j=0; j<info.channels; j++) {
10202 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
10203 out[info.outOffset[j]] += 0.5;
10204 out[info.outOffset[j]] *= scale;
10207 out += info.outJump;
10210 else if (info.inFormat == RTAUDIO_FLOAT32) {
10211 Float32 *in = (Float32 *)inBuffer;
10212 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10213 for (j=0; j<info.channels; j++) {
10214 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
10217 out += info.outJump;
10220 else if (info.inFormat == RTAUDIO_FLOAT64) {
10221 // Channel compensation and/or (de)interleaving only.
10222 Float64 *in = (Float64 *)inBuffer;
10223 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10224 for (j=0; j<info.channels; j++) {
10225 out[info.outOffset[j]] = in[info.inOffset[j]];
10228 out += info.outJump;
10232 else if (info.outFormat == RTAUDIO_FLOAT32) {
10234 Float32 *out = (Float32 *)outBuffer;
10236 if (info.inFormat == RTAUDIO_SINT8) {
10237 signed char *in = (signed char *)inBuffer;
10238 scale = (Float32) ( 1.0 / 127.5 );
10239 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10240 for (j=0; j<info.channels; j++) {
10241 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10242 out[info.outOffset[j]] += 0.5;
10243 out[info.outOffset[j]] *= scale;
10246 out += info.outJump;
10249 else if (info.inFormat == RTAUDIO_SINT16) {
10250 Int16 *in = (Int16 *)inBuffer;
10251 scale = (Float32) ( 1.0 / 32767.5 );
10252 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10253 for (j=0; j<info.channels; j++) {
10254 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10255 out[info.outOffset[j]] += 0.5;
10256 out[info.outOffset[j]] *= scale;
10259 out += info.outJump;
10262 else if (info.inFormat == RTAUDIO_SINT24) {
10263 Int24 *in = (Int24 *)inBuffer;
10264 scale = (Float32) ( 1.0 / 8388607.5 );
10265 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10266 for (j=0; j<info.channels; j++) {
10267 out[info.outOffset[j]] = (Float32) (in[info.inOffset[j]].asInt());
10268 out[info.outOffset[j]] += 0.5;
10269 out[info.outOffset[j]] *= scale;
10272 out += info.outJump;
10275 else if (info.inFormat == RTAUDIO_SINT32) {
10276 Int32 *in = (Int32 *)inBuffer;
10277 scale = (Float32) ( 1.0 / 2147483647.5 );
10278 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10279 for (j=0; j<info.channels; j++) {
10280 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10281 out[info.outOffset[j]] += 0.5;
10282 out[info.outOffset[j]] *= scale;
10285 out += info.outJump;
10288 else if (info.inFormat == RTAUDIO_FLOAT32) {
10289 // Channel compensation and/or (de)interleaving only.
10290 Float32 *in = (Float32 *)inBuffer;
10291 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10292 for (j=0; j<info.channels; j++) {
10293 out[info.outOffset[j]] = in[info.inOffset[j]];
10296 out += info.outJump;
10299 else if (info.inFormat == RTAUDIO_FLOAT64) {
10300 Float64 *in = (Float64 *)inBuffer;
10301 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10302 for (j=0; j<info.channels; j++) {
10303 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10306 out += info.outJump;
10310 else if (info.outFormat == RTAUDIO_SINT32) {
10311 Int32 *out = (Int32 *)outBuffer;
10312 if (info.inFormat == RTAUDIO_SINT8) {
10313 signed char *in = (signed char *)inBuffer;
10314 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10315 for (j=0; j<info.channels; j++) {
10316 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
10317 out[info.outOffset[j]] <<= 24;
10320 out += info.outJump;
10323 else if (info.inFormat == RTAUDIO_SINT16) {
10324 Int16 *in = (Int16 *)inBuffer;
10325 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10326 for (j=0; j<info.channels; j++) {
10327 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
10328 out[info.outOffset[j]] <<= 16;
10331 out += info.outJump;
10334 else if (info.inFormat == RTAUDIO_SINT24) {
10335 Int24 *in = (Int24 *)inBuffer;
10336 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10337 for (j=0; j<info.channels; j++) {
10338 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]].asInt();
10339 out[info.outOffset[j]] <<= 8;
10342 out += info.outJump;
10345 else if (info.inFormat == RTAUDIO_SINT32) {
10346 // Channel compensation and/or (de)interleaving only.
10347 Int32 *in = (Int32 *)inBuffer;
10348 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10349 for (j=0; j<info.channels; j++) {
10350 out[info.outOffset[j]] = in[info.inOffset[j]];
10353 out += info.outJump;
10356 else if (info.inFormat == RTAUDIO_FLOAT32) {
10357 Float32 *in = (Float32 *)inBuffer;
10358 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10359 for (j=0; j<info.channels; j++) {
10360 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
10363 out += info.outJump;
10366 else if (info.inFormat == RTAUDIO_FLOAT64) {
10367 Float64 *in = (Float64 *)inBuffer;
10368 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10369 for (j=0; j<info.channels; j++) {
10370 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
10373 out += info.outJump;
10377 else if (info.outFormat == RTAUDIO_SINT24) {
10378 Int24 *out = (Int24 *)outBuffer;
10379 if (info.inFormat == RTAUDIO_SINT8) {
10380 signed char *in = (signed char *)inBuffer;
10381 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10382 for (j=0; j<info.channels; j++) {
10383 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 16);
10384 //out[info.outOffset[j]] <<= 16;
10387 out += info.outJump;
10390 else if (info.inFormat == RTAUDIO_SINT16) {
10391 Int16 *in = (Int16 *)inBuffer;
10392 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10393 for (j=0; j<info.channels; j++) {
10394 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 8);
10395 //out[info.outOffset[j]] <<= 8;
10398 out += info.outJump;
10401 else if (info.inFormat == RTAUDIO_SINT24) {
10402 // Channel compensation and/or (de)interleaving only.
10403 Int24 *in = (Int24 *)inBuffer;
10404 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10405 for (j=0; j<info.channels; j++) {
10406 out[info.outOffset[j]] = in[info.inOffset[j]];
10409 out += info.outJump;
10412 else if (info.inFormat == RTAUDIO_SINT32) {
10413 Int32 *in = (Int32 *)inBuffer;
10414 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10415 for (j=0; j<info.channels; j++) {
10416 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] >> 8);
10417 //out[info.outOffset[j]] >>= 8;
10420 out += info.outJump;
10423 else if (info.inFormat == RTAUDIO_FLOAT32) {
10424 Float32 *in = (Float32 *)inBuffer;
10425 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10426 for (j=0; j<info.channels; j++) {
10427 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
10430 out += info.outJump;
10433 else if (info.inFormat == RTAUDIO_FLOAT64) {
10434 Float64 *in = (Float64 *)inBuffer;
10435 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10436 for (j=0; j<info.channels; j++) {
10437 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
10440 out += info.outJump;
10444 else if (info.outFormat == RTAUDIO_SINT16) {
10445 Int16 *out = (Int16 *)outBuffer;
10446 if (info.inFormat == RTAUDIO_SINT8) {
10447 signed char *in = (signed char *)inBuffer;
10448 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10449 for (j=0; j<info.channels; j++) {
10450 out[info.outOffset[j]] = (Int16) in[info.inOffset[j]];
10451 out[info.outOffset[j]] <<= 8;
10454 out += info.outJump;
10457 else if (info.inFormat == RTAUDIO_SINT16) {
10458 // Channel compensation and/or (de)interleaving only.
10459 Int16 *in = (Int16 *)inBuffer;
10460 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10461 for (j=0; j<info.channels; j++) {
10462 out[info.outOffset[j]] = in[info.inOffset[j]];
10465 out += info.outJump;
10468 else if (info.inFormat == RTAUDIO_SINT24) {
10469 Int24 *in = (Int24 *)inBuffer;
10470 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10471 for (j=0; j<info.channels; j++) {
10472 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]].asInt() >> 8);
10475 out += info.outJump;
10478 else if (info.inFormat == RTAUDIO_SINT32) {
10479 Int32 *in = (Int32 *)inBuffer;
10480 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10481 for (j=0; j<info.channels; j++) {
10482 out[info.outOffset[j]] = (Int16) ((in[info.inOffset[j]] >> 16) & 0x0000ffff);
10485 out += info.outJump;
10488 else if (info.inFormat == RTAUDIO_FLOAT32) {
10489 Float32 *in = (Float32 *)inBuffer;
10490 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10491 for (j=0; j<info.channels; j++) {
10492 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
10495 out += info.outJump;
10498 else if (info.inFormat == RTAUDIO_FLOAT64) {
10499 Float64 *in = (Float64 *)inBuffer;
10500 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10501 for (j=0; j<info.channels; j++) {
10502 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
10505 out += info.outJump;
10509 else if (info.outFormat == RTAUDIO_SINT8) {
10510 signed char *out = (signed char *)outBuffer;
10511 if (info.inFormat == RTAUDIO_SINT8) {
10512 // Channel compensation and/or (de)interleaving only.
10513 signed char *in = (signed char *)inBuffer;
10514 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10515 for (j=0; j<info.channels; j++) {
10516 out[info.outOffset[j]] = in[info.inOffset[j]];
10519 out += info.outJump;
10522 if (info.inFormat == RTAUDIO_SINT16) {
10523 Int16 *in = (Int16 *)inBuffer;
10524 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10525 for (j=0; j<info.channels; j++) {
10526 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 8) & 0x00ff);
10529 out += info.outJump;
10532 else if (info.inFormat == RTAUDIO_SINT24) {
10533 Int24 *in = (Int24 *)inBuffer;
10534 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10535 for (j=0; j<info.channels; j++) {
10536 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]].asInt() >> 16);
10539 out += info.outJump;
10542 else if (info.inFormat == RTAUDIO_SINT32) {
10543 Int32 *in = (Int32 *)inBuffer;
10544 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10545 for (j=0; j<info.channels; j++) {
10546 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 24) & 0x000000ff);
10549 out += info.outJump;
10552 else if (info.inFormat == RTAUDIO_FLOAT32) {
10553 Float32 *in = (Float32 *)inBuffer;
10554 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10555 for (j=0; j<info.channels; j++) {
10556 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
10559 out += info.outJump;
10562 else if (info.inFormat == RTAUDIO_FLOAT64) {
10563 Float64 *in = (Float64 *)inBuffer;
10564 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10565 for (j=0; j<info.channels; j++) {
10566 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
10569 out += info.outJump;
10575 //static inline uint16_t bswap_16(uint16_t x) { return (x>>8) | (x<<8); }
10576 //static inline uint32_t bswap_32(uint32_t x) { return (bswap_16(x&0xffff)<<16) | (bswap_16(x>>16)); }
10577 //static inline uint64_t bswap_64(uint64_t x) { return (((unsigned long long)bswap_32(x&0xffffffffull))<<32) | (bswap_32(x>>32)); }
10579 void RtApi :: byteSwapBuffer( char *buffer, unsigned int samples, RtAudioFormat format )
10585 if ( format == RTAUDIO_SINT16 ) {
10586 for ( unsigned int i=0; i<samples; i++ ) {
10587 // Swap 1st and 2nd bytes.
10592 // Increment 2 bytes.
10596 else if ( format == RTAUDIO_SINT32 ||
10597 format == RTAUDIO_FLOAT32 ) {
10598 for ( unsigned int i=0; i<samples; i++ ) {
10599 // Swap 1st and 4th bytes.
10604 // Swap 2nd and 3rd bytes.
10610 // Increment 3 more bytes.
10614 else if ( format == RTAUDIO_SINT24 ) {
10615 for ( unsigned int i=0; i<samples; i++ ) {
10616 // Swap 1st and 3rd bytes.
10621 // Increment 2 more bytes.
10625 else if ( format == RTAUDIO_FLOAT64 ) {
10626 for ( unsigned int i=0; i<samples; i++ ) {
10627 // Swap 1st and 8th bytes
10632 // Swap 2nd and 7th bytes
10638 // Swap 3rd and 6th bytes
10644 // Swap 4th and 5th bytes
10650 // Increment 5 more bytes.
10656 // Indentation settings for Vim and Emacs
10658 // Local Variables:
10659 // c-basic-offset: 2
10660 // indent-tabs-mode: nil
10663 // vim: et sts=2 sw=2