1 /************************************************************************/
3 \brief Realtime audio i/o C++ classes.
5 RtAudio provides a common API (Application Programming Interface)
6 for realtime audio input/output across Linux (native ALSA, Jack,
7 and OSS), Macintosh OS X (CoreAudio and Jack), and Windows
8 (DirectSound, ASIO and WASAPI) operating systems.
10 RtAudio WWW site: http://www.music.mcgill.ca/~gary/rtaudio/
12 RtAudio: realtime audio i/o C++ classes
13 Copyright (c) 2001-2017 Gary P. Scavone
15 Permission is hereby granted, free of charge, to any person
16 obtaining a copy of this software and associated documentation files
17 (the "Software"), to deal in the Software without restriction,
18 including without limitation the rights to use, copy, modify, merge,
19 publish, distribute, sublicense, and/or sell copies of the Software,
20 and to permit persons to whom the Software is furnished to do so,
21 subject to the following conditions:
23 The above copyright notice and this permission notice shall be
24 included in all copies or substantial portions of the Software.
26 Any person wishing to distribute modifications to the Software is
27 asked to send the modifications to the original developer so that
28 they can be incorporated into the canonical version. This is,
29 however, not a binding provision of this license.
31 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
32 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
33 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
34 IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
35 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
36 CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
37 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
39 /************************************************************************/
41 // RtAudio: Version 5.0.0
51 // Static variable definitions.
52 const unsigned int RtApi::MAX_SAMPLE_RATES = 14;
53 const unsigned int RtApi::SAMPLE_RATES[] = {
54 4000, 5512, 8000, 9600, 11025, 16000, 22050,
55 32000, 44100, 48000, 88200, 96000, 176400, 192000
58 #if defined(__WINDOWS_DS__) || defined(__WINDOWS_ASIO__) || defined(__WINDOWS_WASAPI__)
59 #define MUTEX_INITIALIZE(A) InitializeCriticalSection(A)
60 #define MUTEX_DESTROY(A) DeleteCriticalSection(A)
61 #define MUTEX_LOCK(A) EnterCriticalSection(A)
62 #define MUTEX_UNLOCK(A) LeaveCriticalSection(A)
66 static std::string convertCharPointerToStdString(const char *text)
68 return std::string(text);
71 static std::string convertCharPointerToStdString(const wchar_t *text)
73 int length = WideCharToMultiByte(CP_UTF8, 0, text, -1, NULL, 0, NULL, NULL);
74 std::string s( length-1, '\0' );
75 WideCharToMultiByte(CP_UTF8, 0, text, -1, &s[0], length, NULL, NULL);
79 #elif defined(__LINUX_ALSA__) || defined(__LINUX_PULSE__) || defined(__UNIX_JACK__) || defined(__LINUX_OSS__) || defined(__MACOSX_CORE__)
81 #define MUTEX_INITIALIZE(A) pthread_mutex_init(A, NULL)
82 #define MUTEX_DESTROY(A) pthread_mutex_destroy(A)
83 #define MUTEX_LOCK(A) pthread_mutex_lock(A)
84 #define MUTEX_UNLOCK(A) pthread_mutex_unlock(A)
86 #define MUTEX_INITIALIZE(A) abs(*A) // dummy definitions
87 #define MUTEX_DESTROY(A) abs(*A) // dummy definitions
90 // *************************************************** //
92 // RtAudio definitions.
94 // *************************************************** //
96 std::string RtAudio :: getVersion( void )
98 return RTAUDIO_VERSION;
101 // Define API names and display names.
102 // Must be in same order as API enum.
104 const char* rtaudio_api_names[][2] = {
105 { "unspecified" , "Unknown" },
107 { "pulse" , "Pulse" },
108 { "oss" , "OpenSoundSystem" },
110 { "core" , "CoreAudio" },
111 { "wasapi" , "WASAPI" },
113 { "ds" , "DirectSound" },
114 { "dummy" , "Dummy" },
116 const unsigned int rtaudio_num_api_names =
117 sizeof(rtaudio_api_names)/sizeof(rtaudio_api_names[0]);
119 // The order here will control the order of RtAudio's API search in
121 extern "C" const RtAudio::Api rtaudio_compiled_apis[] = {
122 #if defined(__UNIX_JACK__)
125 #if defined(__LINUX_PULSE__)
126 RtAudio::LINUX_PULSE,
128 #if defined(__LINUX_ALSA__)
131 #if defined(__LINUX_OSS__)
134 #if defined(__WINDOWS_ASIO__)
135 RtAudio::WINDOWS_ASIO,
137 #if defined(__WINDOWS_WASAPI__)
138 RtAudio::WINDOWS_WASAPI,
140 #if defined(__WINDOWS_DS__)
143 #if defined(__MACOSX_CORE__)
144 RtAudio::MACOSX_CORE,
146 #if defined(__RTAUDIO_DUMMY__)
147 RtAudio::RTAUDIO_DUMMY,
149 RtAudio::UNSPECIFIED,
151 extern "C" const unsigned int rtaudio_num_compiled_apis =
152 sizeof(rtaudio_compiled_apis)/sizeof(rtaudio_compiled_apis[0])-1;
155 // This is a compile-time check that rtaudio_num_api_names == RtAudio::NUM_APIS.
156 // If the build breaks here, check that they match.
157 template<bool b> class StaticAssert { private: StaticAssert() {} };
158 template<> class StaticAssert<true>{ public: StaticAssert() {} };
159 class StaticAssertions { StaticAssertions() {
160 StaticAssert<rtaudio_num_api_names == RtAudio::NUM_APIS>();
163 void RtAudio :: getCompiledApi( std::vector<RtAudio::Api> &apis )
165 apis = std::vector<RtAudio::Api>(rtaudio_compiled_apis,
166 rtaudio_compiled_apis + rtaudio_num_compiled_apis);
169 std::string RtAudio :: getApiName( RtAudio::Api api )
171 if (api < 0 || api >= RtAudio::NUM_APIS)
173 return rtaudio_api_names[api][0];
176 std::string RtAudio :: getApiDisplayName( RtAudio::Api api )
178 if (api < 0 || api >= RtAudio::NUM_APIS)
180 return rtaudio_api_names[api][1];
183 RtAudio::Api RtAudio :: getCompiledApiByName( const std::string &name )
186 for (i = 0; i < rtaudio_num_compiled_apis; ++i)
187 if (name == rtaudio_api_names[rtaudio_compiled_apis[i]][0])
188 return rtaudio_compiled_apis[i];
189 return RtAudio::UNSPECIFIED;
192 void RtAudio :: openRtApi( RtAudio::Api api )
198 #if defined(__UNIX_JACK__)
199 if ( api == UNIX_JACK )
200 rtapi_ = new RtApiJack();
202 #if defined(__LINUX_ALSA__)
203 if ( api == LINUX_ALSA )
204 rtapi_ = new RtApiAlsa();
206 #if defined(__LINUX_PULSE__)
207 if ( api == LINUX_PULSE )
208 rtapi_ = new RtApiPulse();
210 #if defined(__LINUX_OSS__)
211 if ( api == LINUX_OSS )
212 rtapi_ = new RtApiOss();
214 #if defined(__WINDOWS_ASIO__)
215 if ( api == WINDOWS_ASIO )
216 rtapi_ = new RtApiAsio();
218 #if defined(__WINDOWS_WASAPI__)
219 if ( api == WINDOWS_WASAPI )
220 rtapi_ = new RtApiWasapi();
222 #if defined(__WINDOWS_DS__)
223 if ( api == WINDOWS_DS )
224 rtapi_ = new RtApiDs();
226 #if defined(__MACOSX_CORE__)
227 if ( api == MACOSX_CORE )
228 rtapi_ = new RtApiCore();
230 #if defined(__RTAUDIO_DUMMY__)
231 if ( api == RTAUDIO_DUMMY )
232 rtapi_ = new RtApiDummy();
236 RtAudio :: RtAudio( RtAudio::Api api )
240 if ( api != UNSPECIFIED ) {
241 // Attempt to open the specified API.
243 if ( rtapi_ ) return;
245 // No compiled support for specified API value. Issue a debug
246 // warning and continue as if no API was specified.
247 std::cerr << "\nRtAudio: no compiled support for specified API argument!\n" << std::endl;
250 // Iterate through the compiled APIs and return as soon as we find
251 // one with at least one device or we reach the end of the list.
252 std::vector< RtAudio::Api > apis;
253 getCompiledApi( apis );
254 for ( unsigned int i=0; i<apis.size(); i++ ) {
255 openRtApi( apis[i] );
256 if ( rtapi_ && rtapi_->getDeviceCount() ) break;
259 if ( rtapi_ ) return;
261 // It should not be possible to get here because the preprocessor
262 // definition __RTAUDIO_DUMMY__ is automatically defined if no
263 // API-specific definitions are passed to the compiler. But just in
264 // case something weird happens, we'll thow an error.
265 std::string errorText = "\nRtAudio: no compiled API support found ... critical error!!\n\n";
266 throw( RtAudioError( errorText, RtAudioError::UNSPECIFIED ) );
269 RtAudio :: ~RtAudio()
275 void RtAudio :: openStream( RtAudio::StreamParameters *outputParameters,
276 RtAudio::StreamParameters *inputParameters,
277 RtAudioFormat format, unsigned int sampleRate,
278 unsigned int *bufferFrames,
279 RtAudioCallback callback, void *userData,
280 RtAudio::StreamOptions *options,
281 RtAudioErrorCallback errorCallback )
283 return rtapi_->openStream( outputParameters, inputParameters, format,
284 sampleRate, bufferFrames, callback,
285 userData, options, errorCallback );
288 // *************************************************** //
290 // Public RtApi definitions (see end of file for
291 // private or protected utility functions).
293 // *************************************************** //
297 stream_.state = STREAM_CLOSED;
298 stream_.mode = UNINITIALIZED;
299 stream_.apiHandle = 0;
300 stream_.userBuffer[0] = 0;
301 stream_.userBuffer[1] = 0;
302 MUTEX_INITIALIZE( &stream_.mutex );
303 showWarnings_ = true;
304 firstErrorOccurred_ = false;
309 MUTEX_DESTROY( &stream_.mutex );
312 void RtApi :: openStream( RtAudio::StreamParameters *oParams,
313 RtAudio::StreamParameters *iParams,
314 RtAudioFormat format, unsigned int sampleRate,
315 unsigned int *bufferFrames,
316 RtAudioCallback callback, void *userData,
317 RtAudio::StreamOptions *options,
318 RtAudioErrorCallback errorCallback )
320 if ( stream_.state != STREAM_CLOSED ) {
321 errorText_ = "RtApi::openStream: a stream is already open!";
322 error( RtAudioError::INVALID_USE );
326 // Clear stream information potentially left from a previously open stream.
329 if ( oParams && oParams->nChannels < 1 ) {
330 errorText_ = "RtApi::openStream: a non-NULL output StreamParameters structure cannot have an nChannels value less than one.";
331 error( RtAudioError::INVALID_USE );
335 if ( iParams && iParams->nChannels < 1 ) {
336 errorText_ = "RtApi::openStream: a non-NULL input StreamParameters structure cannot have an nChannels value less than one.";
337 error( RtAudioError::INVALID_USE );
341 if ( oParams == NULL && iParams == NULL ) {
342 errorText_ = "RtApi::openStream: input and output StreamParameters structures are both NULL!";
343 error( RtAudioError::INVALID_USE );
347 if ( formatBytes(format) == 0 ) {
348 errorText_ = "RtApi::openStream: 'format' parameter value is undefined.";
349 error( RtAudioError::INVALID_USE );
353 unsigned int nDevices = getDeviceCount();
354 unsigned int oChannels = 0;
356 oChannels = oParams->nChannels;
357 if ( oParams->deviceId >= nDevices ) {
358 errorText_ = "RtApi::openStream: output device parameter value is invalid.";
359 error( RtAudioError::INVALID_USE );
364 unsigned int iChannels = 0;
366 iChannels = iParams->nChannels;
367 if ( iParams->deviceId >= nDevices ) {
368 errorText_ = "RtApi::openStream: input device parameter value is invalid.";
369 error( RtAudioError::INVALID_USE );
376 if ( oChannels > 0 ) {
378 result = probeDeviceOpen( oParams->deviceId, OUTPUT, oChannels, oParams->firstChannel,
379 sampleRate, format, bufferFrames, options );
380 if ( result == false ) {
381 error( RtAudioError::SYSTEM_ERROR );
386 if ( iChannels > 0 ) {
388 result = probeDeviceOpen( iParams->deviceId, INPUT, iChannels, iParams->firstChannel,
389 sampleRate, format, bufferFrames, options );
390 if ( result == false ) {
391 if ( oChannels > 0 ) closeStream();
392 error( RtAudioError::SYSTEM_ERROR );
397 stream_.callbackInfo.callback = (void *) callback;
398 stream_.callbackInfo.userData = userData;
399 stream_.callbackInfo.errorCallback = (void *) errorCallback;
401 if ( options ) options->numberOfBuffers = stream_.nBuffers;
402 stream_.state = STREAM_STOPPED;
405 unsigned int RtApi :: getDefaultInputDevice( void )
407 // Should be implemented in subclasses if possible.
411 unsigned int RtApi :: getDefaultOutputDevice( void )
413 // Should be implemented in subclasses if possible.
417 void RtApi :: closeStream( void )
419 // MUST be implemented in subclasses!
423 bool RtApi :: probeDeviceOpen( unsigned int /*device*/, StreamMode /*mode*/, unsigned int /*channels*/,
424 unsigned int /*firstChannel*/, unsigned int /*sampleRate*/,
425 RtAudioFormat /*format*/, unsigned int * /*bufferSize*/,
426 RtAudio::StreamOptions * /*options*/ )
428 // MUST be implemented in subclasses!
432 void RtApi :: tickStreamTime( void )
434 // Subclasses that do not provide their own implementation of
435 // getStreamTime should call this function once per buffer I/O to
436 // provide basic stream time support.
438 stream_.streamTime += ( stream_.bufferSize * 1.0 / stream_.sampleRate );
440 #if defined( HAVE_GETTIMEOFDAY )
441 gettimeofday( &stream_.lastTickTimestamp, NULL );
445 long RtApi :: getStreamLatency( void )
449 long totalLatency = 0;
450 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
451 totalLatency = stream_.latency[0];
452 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
453 totalLatency += stream_.latency[1];
458 double RtApi :: getStreamTime( void )
462 #if defined( HAVE_GETTIMEOFDAY )
463 // Return a very accurate estimate of the stream time by
464 // adding in the elapsed time since the last tick.
468 if ( stream_.state != STREAM_RUNNING || stream_.streamTime == 0.0 )
469 return stream_.streamTime;
471 gettimeofday( &now, NULL );
472 then = stream_.lastTickTimestamp;
473 return stream_.streamTime +
474 ((now.tv_sec + 0.000001 * now.tv_usec) -
475 (then.tv_sec + 0.000001 * then.tv_usec));
477 return stream_.streamTime;
481 void RtApi :: setStreamTime( double time )
486 stream_.streamTime = time;
487 #if defined( HAVE_GETTIMEOFDAY )
488 gettimeofday( &stream_.lastTickTimestamp, NULL );
492 unsigned int RtApi :: getStreamSampleRate( void )
496 return stream_.sampleRate;
500 // *************************************************** //
502 // OS/API-specific methods.
504 // *************************************************** //
506 #if defined(__MACOSX_CORE__)
508 // The OS X CoreAudio API is designed to use a separate callback
509 // procedure for each of its audio devices. A single RtAudio duplex
510 // stream using two different devices is supported here, though it
511 // cannot be guaranteed to always behave correctly because we cannot
512 // synchronize these two callbacks.
514 // A property listener is installed for over/underrun information.
515 // However, no functionality is currently provided to allow property
516 // listeners to trigger user handlers because it is unclear what could
517 // be done if a critical stream parameter (buffer size, sample rate,
518 // device disconnect) notification arrived. The listeners entail
519 // quite a bit of extra code and most likely, a user program wouldn't
520 // be prepared for the result anyway. However, we do provide a flag
521 // to the client callback function to inform of an over/underrun.
523 // A structure to hold various information related to the CoreAudio API
526 AudioDeviceID id[2]; // device ids
527 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
528 AudioDeviceIOProcID procId[2];
530 UInt32 iStream[2]; // device stream index (or first if using multiple)
531 UInt32 nStreams[2]; // number of streams to use
534 pthread_cond_t condition;
535 int drainCounter; // Tracks callback counts when draining
536 bool internalDrain; // Indicates if stop is initiated from callback or not.
539 :deviceBuffer(0), drainCounter(0), internalDrain(false) { nStreams[0] = 1; nStreams[1] = 1; id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
542 RtApiCore:: RtApiCore()
544 #if defined( AVAILABLE_MAC_OS_X_VERSION_10_6_AND_LATER )
545 // This is a largely undocumented but absolutely necessary
546 // requirement starting with OS-X 10.6. If not called, queries and
547 // updates to various audio device properties are not handled
549 CFRunLoopRef theRunLoop = NULL;
550 AudioObjectPropertyAddress property = { kAudioHardwarePropertyRunLoop,
551 kAudioObjectPropertyScopeGlobal,
552 kAudioObjectPropertyElementMaster };
553 OSStatus result = AudioObjectSetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, sizeof(CFRunLoopRef), &theRunLoop);
554 if ( result != noErr ) {
555 errorText_ = "RtApiCore::RtApiCore: error setting run loop property!";
556 error( RtAudioError::WARNING );
561 RtApiCore :: ~RtApiCore()
563 // The subclass destructor gets called before the base class
564 // destructor, so close an existing stream before deallocating
565 // apiDeviceId memory.
566 if ( stream_.state != STREAM_CLOSED ) closeStream();
569 unsigned int RtApiCore :: getDeviceCount( void )
571 // Find out how many audio devices there are, if any.
573 AudioObjectPropertyAddress propertyAddress = { kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
574 OSStatus result = AudioObjectGetPropertyDataSize( kAudioObjectSystemObject, &propertyAddress, 0, NULL, &dataSize );
575 if ( result != noErr ) {
576 errorText_ = "RtApiCore::getDeviceCount: OS-X error getting device info!";
577 error( RtAudioError::WARNING );
581 return dataSize / sizeof( AudioDeviceID );
584 unsigned int RtApiCore :: getDefaultInputDevice( void )
586 unsigned int nDevices = getDeviceCount();
587 if ( nDevices <= 1 ) return 0;
590 UInt32 dataSize = sizeof( AudioDeviceID );
591 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultInputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
592 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
593 if ( result != noErr ) {
594 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device.";
595 error( RtAudioError::WARNING );
599 dataSize *= nDevices;
600 AudioDeviceID deviceList[ nDevices ];
601 property.mSelector = kAudioHardwarePropertyDevices;
602 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
603 if ( result != noErr ) {
604 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device IDs.";
605 error( RtAudioError::WARNING );
609 for ( unsigned int i=0; i<nDevices; i++ )
610 if ( id == deviceList[i] ) return i;
612 errorText_ = "RtApiCore::getDefaultInputDevice: No default device found!";
613 error( RtAudioError::WARNING );
617 unsigned int RtApiCore :: getDefaultOutputDevice( void )
619 unsigned int nDevices = getDeviceCount();
620 if ( nDevices <= 1 ) return 0;
623 UInt32 dataSize = sizeof( AudioDeviceID );
624 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultOutputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
625 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
626 if ( result != noErr ) {
627 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device.";
628 error( RtAudioError::WARNING );
632 dataSize = sizeof( AudioDeviceID ) * nDevices;
633 AudioDeviceID deviceList[ nDevices ];
634 property.mSelector = kAudioHardwarePropertyDevices;
635 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
636 if ( result != noErr ) {
637 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device IDs.";
638 error( RtAudioError::WARNING );
642 for ( unsigned int i=0; i<nDevices; i++ )
643 if ( id == deviceList[i] ) return i;
645 errorText_ = "RtApiCore::getDefaultOutputDevice: No default device found!";
646 error( RtAudioError::WARNING );
650 RtAudio::DeviceInfo RtApiCore :: getDeviceInfo( unsigned int device )
652 RtAudio::DeviceInfo info;
656 unsigned int nDevices = getDeviceCount();
657 if ( nDevices == 0 ) {
658 errorText_ = "RtApiCore::getDeviceInfo: no devices found!";
659 error( RtAudioError::INVALID_USE );
663 if ( device >= nDevices ) {
664 errorText_ = "RtApiCore::getDeviceInfo: device ID is invalid!";
665 error( RtAudioError::INVALID_USE );
669 AudioDeviceID deviceList[ nDevices ];
670 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
671 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
672 kAudioObjectPropertyScopeGlobal,
673 kAudioObjectPropertyElementMaster };
674 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
675 0, NULL, &dataSize, (void *) &deviceList );
676 if ( result != noErr ) {
677 errorText_ = "RtApiCore::getDeviceInfo: OS-X system error getting device IDs.";
678 error( RtAudioError::WARNING );
682 AudioDeviceID id = deviceList[ device ];
684 // Get the device name.
687 dataSize = sizeof( CFStringRef );
688 property.mSelector = kAudioObjectPropertyManufacturer;
689 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
690 if ( result != noErr ) {
691 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device manufacturer.";
692 errorText_ = errorStream_.str();
693 error( RtAudioError::WARNING );
697 //const char *mname = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
698 int length = CFStringGetLength(cfname);
699 char *mname = (char *)malloc(length * 3 + 1);
700 #if defined( UNICODE ) || defined( _UNICODE )
701 CFStringGetCString(cfname, mname, length * 3 + 1, kCFStringEncodingUTF8);
703 CFStringGetCString(cfname, mname, length * 3 + 1, CFStringGetSystemEncoding());
705 info.name.append( (const char *)mname, strlen(mname) );
706 info.name.append( ": " );
710 property.mSelector = kAudioObjectPropertyName;
711 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
712 if ( result != noErr ) {
713 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device name.";
714 errorText_ = errorStream_.str();
715 error( RtAudioError::WARNING );
719 //const char *name = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
720 length = CFStringGetLength(cfname);
721 char *name = (char *)malloc(length * 3 + 1);
722 #if defined( UNICODE ) || defined( _UNICODE )
723 CFStringGetCString(cfname, name, length * 3 + 1, kCFStringEncodingUTF8);
725 CFStringGetCString(cfname, name, length * 3 + 1, CFStringGetSystemEncoding());
727 info.name.append( (const char *)name, strlen(name) );
731 // Get the output stream "configuration".
732 AudioBufferList *bufferList = nil;
733 property.mSelector = kAudioDevicePropertyStreamConfiguration;
734 property.mScope = kAudioDevicePropertyScopeOutput;
735 // property.mElement = kAudioObjectPropertyElementWildcard;
737 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
738 if ( result != noErr || dataSize == 0 ) {
739 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration info for device (" << device << ").";
740 errorText_ = errorStream_.str();
741 error( RtAudioError::WARNING );
745 // Allocate the AudioBufferList.
746 bufferList = (AudioBufferList *) malloc( dataSize );
747 if ( bufferList == NULL ) {
748 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating output AudioBufferList.";
749 error( RtAudioError::WARNING );
753 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
754 if ( result != noErr || dataSize == 0 ) {
756 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration for device (" << device << ").";
757 errorText_ = errorStream_.str();
758 error( RtAudioError::WARNING );
762 // Get output channel information.
763 unsigned int i, nStreams = bufferList->mNumberBuffers;
764 for ( i=0; i<nStreams; i++ )
765 info.outputChannels += bufferList->mBuffers[i].mNumberChannels;
768 // Get the input stream "configuration".
769 property.mScope = kAudioDevicePropertyScopeInput;
770 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
771 if ( result != noErr || dataSize == 0 ) {
772 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration info for device (" << device << ").";
773 errorText_ = errorStream_.str();
774 error( RtAudioError::WARNING );
778 // Allocate the AudioBufferList.
779 bufferList = (AudioBufferList *) malloc( dataSize );
780 if ( bufferList == NULL ) {
781 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating input AudioBufferList.";
782 error( RtAudioError::WARNING );
786 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
787 if (result != noErr || dataSize == 0) {
789 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration for device (" << device << ").";
790 errorText_ = errorStream_.str();
791 error( RtAudioError::WARNING );
795 // Get input channel information.
796 nStreams = bufferList->mNumberBuffers;
797 for ( i=0; i<nStreams; i++ )
798 info.inputChannels += bufferList->mBuffers[i].mNumberChannels;
801 // If device opens for both playback and capture, we determine the channels.
802 if ( info.outputChannels > 0 && info.inputChannels > 0 )
803 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
805 // Probe the device sample rates.
806 bool isInput = false;
807 if ( info.outputChannels == 0 ) isInput = true;
809 // Determine the supported sample rates.
810 property.mSelector = kAudioDevicePropertyAvailableNominalSampleRates;
811 if ( isInput == false ) property.mScope = kAudioDevicePropertyScopeOutput;
812 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
813 if ( result != kAudioHardwareNoError || dataSize == 0 ) {
814 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rate info.";
815 errorText_ = errorStream_.str();
816 error( RtAudioError::WARNING );
820 UInt32 nRanges = dataSize / sizeof( AudioValueRange );
821 AudioValueRange rangeList[ nRanges ];
822 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &rangeList );
823 if ( result != kAudioHardwareNoError ) {
824 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rates.";
825 errorText_ = errorStream_.str();
826 error( RtAudioError::WARNING );
830 // The sample rate reporting mechanism is a bit of a mystery. It
831 // seems that it can either return individual rates or a range of
832 // rates. I assume that if the min / max range values are the same,
833 // then that represents a single supported rate and if the min / max
834 // range values are different, the device supports an arbitrary
835 // range of values (though there might be multiple ranges, so we'll
836 // use the most conservative range).
837 Float64 minimumRate = 1.0, maximumRate = 10000000000.0;
838 bool haveValueRange = false;
839 info.sampleRates.clear();
840 for ( UInt32 i=0; i<nRanges; i++ ) {
841 if ( rangeList[i].mMinimum == rangeList[i].mMaximum ) {
842 unsigned int tmpSr = (unsigned int) rangeList[i].mMinimum;
843 info.sampleRates.push_back( tmpSr );
845 if ( !info.preferredSampleRate || ( tmpSr <= 48000 && tmpSr > info.preferredSampleRate ) )
846 info.preferredSampleRate = tmpSr;
849 haveValueRange = true;
850 if ( rangeList[i].mMinimum > minimumRate ) minimumRate = rangeList[i].mMinimum;
851 if ( rangeList[i].mMaximum < maximumRate ) maximumRate = rangeList[i].mMaximum;
855 if ( haveValueRange ) {
856 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
857 if ( SAMPLE_RATES[k] >= (unsigned int) minimumRate && SAMPLE_RATES[k] <= (unsigned int) maximumRate ) {
858 info.sampleRates.push_back( SAMPLE_RATES[k] );
860 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
861 info.preferredSampleRate = SAMPLE_RATES[k];
866 // Sort and remove any redundant values
867 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
868 info.sampleRates.erase( unique( info.sampleRates.begin(), info.sampleRates.end() ), info.sampleRates.end() );
870 if ( info.sampleRates.size() == 0 ) {
871 errorStream_ << "RtApiCore::probeDeviceInfo: No supported sample rates found for device (" << device << ").";
872 errorText_ = errorStream_.str();
873 error( RtAudioError::WARNING );
877 // CoreAudio always uses 32-bit floating point data for PCM streams.
878 // Thus, any other "physical" formats supported by the device are of
879 // no interest to the client.
880 info.nativeFormats = RTAUDIO_FLOAT32;
882 if ( info.outputChannels > 0 )
883 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
884 if ( info.inputChannels > 0 )
885 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
891 static OSStatus callbackHandler( AudioDeviceID inDevice,
892 const AudioTimeStamp* /*inNow*/,
893 const AudioBufferList* inInputData,
894 const AudioTimeStamp* /*inInputTime*/,
895 AudioBufferList* outOutputData,
896 const AudioTimeStamp* /*inOutputTime*/,
899 CallbackInfo *info = (CallbackInfo *) infoPointer;
901 RtApiCore *object = (RtApiCore *) info->object;
902 if ( object->callbackEvent( inDevice, inInputData, outOutputData ) == false )
903 return kAudioHardwareUnspecifiedError;
905 return kAudioHardwareNoError;
908 static OSStatus xrunListener( AudioObjectID /*inDevice*/,
910 const AudioObjectPropertyAddress properties[],
911 void* handlePointer )
913 CoreHandle *handle = (CoreHandle *) handlePointer;
914 for ( UInt32 i=0; i<nAddresses; i++ ) {
915 if ( properties[i].mSelector == kAudioDeviceProcessorOverload ) {
916 if ( properties[i].mScope == kAudioDevicePropertyScopeInput )
917 handle->xrun[1] = true;
919 handle->xrun[0] = true;
923 return kAudioHardwareNoError;
926 static OSStatus rateListener( AudioObjectID inDevice,
927 UInt32 /*nAddresses*/,
928 const AudioObjectPropertyAddress /*properties*/[],
931 Float64 *rate = (Float64 *) ratePointer;
932 UInt32 dataSize = sizeof( Float64 );
933 AudioObjectPropertyAddress property = { kAudioDevicePropertyNominalSampleRate,
934 kAudioObjectPropertyScopeGlobal,
935 kAudioObjectPropertyElementMaster };
936 AudioObjectGetPropertyData( inDevice, &property, 0, NULL, &dataSize, rate );
937 return kAudioHardwareNoError;
940 bool RtApiCore :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
941 unsigned int firstChannel, unsigned int sampleRate,
942 RtAudioFormat format, unsigned int *bufferSize,
943 RtAudio::StreamOptions *options )
946 unsigned int nDevices = getDeviceCount();
947 if ( nDevices == 0 ) {
948 // This should not happen because a check is made before this function is called.
949 errorText_ = "RtApiCore::probeDeviceOpen: no devices found!";
953 if ( device >= nDevices ) {
954 // This should not happen because a check is made before this function is called.
955 errorText_ = "RtApiCore::probeDeviceOpen: device ID is invalid!";
959 AudioDeviceID deviceList[ nDevices ];
960 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
961 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
962 kAudioObjectPropertyScopeGlobal,
963 kAudioObjectPropertyElementMaster };
964 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
965 0, NULL, &dataSize, (void *) &deviceList );
966 if ( result != noErr ) {
967 errorText_ = "RtApiCore::probeDeviceOpen: OS-X system error getting device IDs.";
971 AudioDeviceID id = deviceList[ device ];
973 // Setup for stream mode.
974 bool isInput = false;
975 if ( mode == INPUT ) {
977 property.mScope = kAudioDevicePropertyScopeInput;
980 property.mScope = kAudioDevicePropertyScopeOutput;
982 // Get the stream "configuration".
983 AudioBufferList *bufferList = nil;
985 property.mSelector = kAudioDevicePropertyStreamConfiguration;
986 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
987 if ( result != noErr || dataSize == 0 ) {
988 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration info for device (" << device << ").";
989 errorText_ = errorStream_.str();
993 // Allocate the AudioBufferList.
994 bufferList = (AudioBufferList *) malloc( dataSize );
995 if ( bufferList == NULL ) {
996 errorText_ = "RtApiCore::probeDeviceOpen: memory error allocating AudioBufferList.";
1000 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
1001 if (result != noErr || dataSize == 0) {
1003 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration for device (" << device << ").";
1004 errorText_ = errorStream_.str();
1008 // Search for one or more streams that contain the desired number of
1009 // channels. CoreAudio devices can have an arbitrary number of
1010 // streams and each stream can have an arbitrary number of channels.
1011 // For each stream, a single buffer of interleaved samples is
1012 // provided. RtAudio prefers the use of one stream of interleaved
1013 // data or multiple consecutive single-channel streams. However, we
1014 // now support multiple consecutive multi-channel streams of
1015 // interleaved data as well.
1016 UInt32 iStream, offsetCounter = firstChannel;
1017 UInt32 nStreams = bufferList->mNumberBuffers;
1018 bool monoMode = false;
1019 bool foundStream = false;
1021 // First check that the device supports the requested number of
1023 UInt32 deviceChannels = 0;
1024 for ( iStream=0; iStream<nStreams; iStream++ )
1025 deviceChannels += bufferList->mBuffers[iStream].mNumberChannels;
1027 if ( deviceChannels < ( channels + firstChannel ) ) {
1029 errorStream_ << "RtApiCore::probeDeviceOpen: the device (" << device << ") does not support the requested channel count.";
1030 errorText_ = errorStream_.str();
1034 // Look for a single stream meeting our needs.
1035 UInt32 firstStream, streamCount = 1, streamChannels = 0, channelOffset = 0;
1036 for ( iStream=0; iStream<nStreams; iStream++ ) {
1037 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
1038 if ( streamChannels >= channels + offsetCounter ) {
1039 firstStream = iStream;
1040 channelOffset = offsetCounter;
1044 if ( streamChannels > offsetCounter ) break;
1045 offsetCounter -= streamChannels;
1048 // If we didn't find a single stream above, then we should be able
1049 // to meet the channel specification with multiple streams.
1050 if ( foundStream == false ) {
1052 offsetCounter = firstChannel;
1053 for ( iStream=0; iStream<nStreams; iStream++ ) {
1054 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
1055 if ( streamChannels > offsetCounter ) break;
1056 offsetCounter -= streamChannels;
1059 firstStream = iStream;
1060 channelOffset = offsetCounter;
1061 Int32 channelCounter = channels + offsetCounter - streamChannels;
1063 if ( streamChannels > 1 ) monoMode = false;
1064 while ( channelCounter > 0 ) {
1065 streamChannels = bufferList->mBuffers[++iStream].mNumberChannels;
1066 if ( streamChannels > 1 ) monoMode = false;
1067 channelCounter -= streamChannels;
1074 // Determine the buffer size.
1075 AudioValueRange bufferRange;
1076 dataSize = sizeof( AudioValueRange );
1077 property.mSelector = kAudioDevicePropertyBufferFrameSizeRange;
1078 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &bufferRange );
1080 if ( result != noErr ) {
1081 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting buffer size range for device (" << device << ").";
1082 errorText_ = errorStream_.str();
1086 if ( bufferRange.mMinimum > *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMinimum;
1087 else if ( bufferRange.mMaximum < *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMaximum;
1088 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) *bufferSize = (unsigned long) bufferRange.mMinimum;
1090 // Set the buffer size. For multiple streams, I'm assuming we only
1091 // need to make this setting for the master channel.
1092 UInt32 theSize = (UInt32) *bufferSize;
1093 dataSize = sizeof( UInt32 );
1094 property.mSelector = kAudioDevicePropertyBufferFrameSize;
1095 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &theSize );
1097 if ( result != noErr ) {
1098 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting the buffer size for device (" << device << ").";
1099 errorText_ = errorStream_.str();
1103 // If attempting to setup a duplex stream, the bufferSize parameter
1104 // MUST be the same in both directions!
1105 *bufferSize = theSize;
1106 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
1107 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << device << ").";
1108 errorText_ = errorStream_.str();
1112 stream_.bufferSize = *bufferSize;
1113 stream_.nBuffers = 1;
1115 // Try to set "hog" mode ... it's not clear to me this is working.
1116 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) {
1118 dataSize = sizeof( hog_pid );
1119 property.mSelector = kAudioDevicePropertyHogMode;
1120 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &hog_pid );
1121 if ( result != noErr ) {
1122 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting 'hog' state!";
1123 errorText_ = errorStream_.str();
1127 if ( hog_pid != getpid() ) {
1129 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &hog_pid );
1130 if ( result != noErr ) {
1131 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting 'hog' state!";
1132 errorText_ = errorStream_.str();
1138 // Check and if necessary, change the sample rate for the device.
1139 Float64 nominalRate;
1140 dataSize = sizeof( Float64 );
1141 property.mSelector = kAudioDevicePropertyNominalSampleRate;
1142 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &nominalRate );
1143 if ( result != noErr ) {
1144 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting current sample rate.";
1145 errorText_ = errorStream_.str();
1149 // Only change the sample rate if off by more than 1 Hz.
1150 if ( fabs( nominalRate - (double)sampleRate ) > 1.0 ) {
1152 // Set a property listener for the sample rate change
1153 Float64 reportedRate = 0.0;
1154 AudioObjectPropertyAddress tmp = { kAudioDevicePropertyNominalSampleRate, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
1155 result = AudioObjectAddPropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1156 if ( result != noErr ) {
1157 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate property listener for device (" << device << ").";
1158 errorText_ = errorStream_.str();
1162 nominalRate = (Float64) sampleRate;
1163 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &nominalRate );
1164 if ( result != noErr ) {
1165 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1166 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate for device (" << device << ").";
1167 errorText_ = errorStream_.str();
1171 // Now wait until the reported nominal rate is what we just set.
1172 UInt32 microCounter = 0;
1173 while ( reportedRate != nominalRate ) {
1174 microCounter += 5000;
1175 if ( microCounter > 5000000 ) break;
1179 // Remove the property listener.
1180 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1182 if ( microCounter > 5000000 ) {
1183 errorStream_ << "RtApiCore::probeDeviceOpen: timeout waiting for sample rate update for device (" << device << ").";
1184 errorText_ = errorStream_.str();
1189 // Now set the stream format for all streams. Also, check the
1190 // physical format of the device and change that if necessary.
1191 AudioStreamBasicDescription description;
1192 dataSize = sizeof( AudioStreamBasicDescription );
1193 property.mSelector = kAudioStreamPropertyVirtualFormat;
1194 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
1195 if ( result != noErr ) {
1196 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream format for device (" << device << ").";
1197 errorText_ = errorStream_.str();
1201 // Set the sample rate and data format id. However, only make the
1202 // change if the sample rate is not within 1.0 of the desired
1203 // rate and the format is not linear pcm.
1204 bool updateFormat = false;
1205 if ( fabs( description.mSampleRate - (Float64)sampleRate ) > 1.0 ) {
1206 description.mSampleRate = (Float64) sampleRate;
1207 updateFormat = true;
1210 if ( description.mFormatID != kAudioFormatLinearPCM ) {
1211 description.mFormatID = kAudioFormatLinearPCM;
1212 updateFormat = true;
1215 if ( updateFormat ) {
1216 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &description );
1217 if ( result != noErr ) {
1218 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate or data format for device (" << device << ").";
1219 errorText_ = errorStream_.str();
1224 // Now check the physical format.
1225 property.mSelector = kAudioStreamPropertyPhysicalFormat;
1226 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
1227 if ( result != noErr ) {
1228 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream physical format for device (" << device << ").";
1229 errorText_ = errorStream_.str();
1233 //std::cout << "Current physical stream format:" << std::endl;
1234 //std::cout << " mBitsPerChan = " << description.mBitsPerChannel << std::endl;
1235 //std::cout << " aligned high = " << (description.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (description.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
1236 //std::cout << " bytesPerFrame = " << description.mBytesPerFrame << std::endl;
1237 //std::cout << " sample rate = " << description.mSampleRate << std::endl;
1239 if ( description.mFormatID != kAudioFormatLinearPCM || description.mBitsPerChannel < 16 ) {
1240 description.mFormatID = kAudioFormatLinearPCM;
1241 //description.mSampleRate = (Float64) sampleRate;
1242 AudioStreamBasicDescription testDescription = description;
1245 // We'll try higher bit rates first and then work our way down.
1246 std::vector< std::pair<UInt32, UInt32> > physicalFormats;
1247 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsFloat) & ~kLinearPCMFormatFlagIsSignedInteger;
1248 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
1249 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
1250 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
1251 physicalFormats.push_back( std::pair<Float32, UInt32>( 24, formatFlags ) ); // 24-bit packed
1252 formatFlags &= ~( kAudioFormatFlagIsPacked | kAudioFormatFlagIsAlignedHigh );
1253 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.2, formatFlags ) ); // 24-bit in 4 bytes, aligned low
1254 formatFlags |= kAudioFormatFlagIsAlignedHigh;
1255 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.4, formatFlags ) ); // 24-bit in 4 bytes, aligned high
1256 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
1257 physicalFormats.push_back( std::pair<Float32, UInt32>( 16, formatFlags ) );
1258 physicalFormats.push_back( std::pair<Float32, UInt32>( 8, formatFlags ) );
1260 bool setPhysicalFormat = false;
1261 for( unsigned int i=0; i<physicalFormats.size(); i++ ) {
1262 testDescription = description;
1263 testDescription.mBitsPerChannel = (UInt32) physicalFormats[i].first;
1264 testDescription.mFormatFlags = physicalFormats[i].second;
1265 if ( (24 == (UInt32)physicalFormats[i].first) && ~( physicalFormats[i].second & kAudioFormatFlagIsPacked ) )
1266 testDescription.mBytesPerFrame = 4 * testDescription.mChannelsPerFrame;
1268 testDescription.mBytesPerFrame = testDescription.mBitsPerChannel/8 * testDescription.mChannelsPerFrame;
1269 testDescription.mBytesPerPacket = testDescription.mBytesPerFrame * testDescription.mFramesPerPacket;
1270 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &testDescription );
1271 if ( result == noErr ) {
1272 setPhysicalFormat = true;
1273 //std::cout << "Updated physical stream format:" << std::endl;
1274 //std::cout << " mBitsPerChan = " << testDescription.mBitsPerChannel << std::endl;
1275 //std::cout << " aligned high = " << (testDescription.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (testDescription.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
1276 //std::cout << " bytesPerFrame = " << testDescription.mBytesPerFrame << std::endl;
1277 //std::cout << " sample rate = " << testDescription.mSampleRate << std::endl;
1282 if ( !setPhysicalFormat ) {
1283 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting physical data format for device (" << device << ").";
1284 errorText_ = errorStream_.str();
1287 } // done setting virtual/physical formats.
1289 // Get the stream / device latency.
1291 dataSize = sizeof( UInt32 );
1292 property.mSelector = kAudioDevicePropertyLatency;
1293 if ( AudioObjectHasProperty( id, &property ) == true ) {
1294 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &latency );
1295 if ( result == kAudioHardwareNoError ) stream_.latency[ mode ] = latency;
1297 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting device latency for device (" << device << ").";
1298 errorText_ = errorStream_.str();
1299 error( RtAudioError::WARNING );
1303 // Byte-swapping: According to AudioHardware.h, the stream data will
1304 // always be presented in native-endian format, so we should never
1305 // need to byte swap.
1306 stream_.doByteSwap[mode] = false;
1308 // From the CoreAudio documentation, PCM data must be supplied as
1310 stream_.userFormat = format;
1311 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
1313 if ( streamCount == 1 )
1314 stream_.nDeviceChannels[mode] = description.mChannelsPerFrame;
1315 else // multiple streams
1316 stream_.nDeviceChannels[mode] = channels;
1317 stream_.nUserChannels[mode] = channels;
1318 stream_.channelOffset[mode] = channelOffset; // offset within a CoreAudio stream
1319 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
1320 else stream_.userInterleaved = true;
1321 stream_.deviceInterleaved[mode] = true;
1322 if ( monoMode == true ) stream_.deviceInterleaved[mode] = false;
1324 // Set flags for buffer conversion.
1325 stream_.doConvertBuffer[mode] = false;
1326 if ( stream_.userFormat != stream_.deviceFormat[mode] )
1327 stream_.doConvertBuffer[mode] = true;
1328 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
1329 stream_.doConvertBuffer[mode] = true;
1330 if ( streamCount == 1 ) {
1331 if ( stream_.nUserChannels[mode] > 1 &&
1332 stream_.userInterleaved != stream_.deviceInterleaved[mode] )
1333 stream_.doConvertBuffer[mode] = true;
1335 else if ( monoMode && stream_.userInterleaved )
1336 stream_.doConvertBuffer[mode] = true;
1338 // Allocate our CoreHandle structure for the stream.
1339 CoreHandle *handle = 0;
1340 if ( stream_.apiHandle == 0 ) {
1342 handle = new CoreHandle;
1344 catch ( std::bad_alloc& ) {
1345 errorText_ = "RtApiCore::probeDeviceOpen: error allocating CoreHandle memory.";
1349 if ( pthread_cond_init( &handle->condition, NULL ) ) {
1350 errorText_ = "RtApiCore::probeDeviceOpen: error initializing pthread condition variable.";
1353 stream_.apiHandle = (void *) handle;
1356 handle = (CoreHandle *) stream_.apiHandle;
1357 handle->iStream[mode] = firstStream;
1358 handle->nStreams[mode] = streamCount;
1359 handle->id[mode] = id;
1361 // Allocate necessary internal buffers.
1362 unsigned long bufferBytes;
1363 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
1364 // stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
1365 stream_.userBuffer[mode] = (char *) malloc( bufferBytes * sizeof(char) );
1366 memset( stream_.userBuffer[mode], 0, bufferBytes * sizeof(char) );
1367 if ( stream_.userBuffer[mode] == NULL ) {
1368 errorText_ = "RtApiCore::probeDeviceOpen: error allocating user buffer memory.";
1372 // If possible, we will make use of the CoreAudio stream buffers as
1373 // "device buffers". However, we can't do this if using multiple
1375 if ( stream_.doConvertBuffer[mode] && handle->nStreams[mode] > 1 ) {
1377 bool makeBuffer = true;
1378 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
1379 if ( mode == INPUT ) {
1380 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
1381 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
1382 if ( bufferBytes <= bytesOut ) makeBuffer = false;
1387 bufferBytes *= *bufferSize;
1388 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
1389 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
1390 if ( stream_.deviceBuffer == NULL ) {
1391 errorText_ = "RtApiCore::probeDeviceOpen: error allocating device buffer memory.";
1397 stream_.sampleRate = sampleRate;
1398 stream_.device[mode] = device;
1399 stream_.state = STREAM_STOPPED;
1400 stream_.callbackInfo.object = (void *) this;
1402 // Setup the buffer conversion information structure.
1403 if ( stream_.doConvertBuffer[mode] ) {
1404 if ( streamCount > 1 ) setConvertInfo( mode, 0 );
1405 else setConvertInfo( mode, channelOffset );
1408 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device )
1409 // Only one callback procedure per device.
1410 stream_.mode = DUPLEX;
1412 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1413 result = AudioDeviceCreateIOProcID( id, callbackHandler, (void *) &stream_.callbackInfo, &handle->procId[mode] );
1415 // deprecated in favor of AudioDeviceCreateIOProcID()
1416 result = AudioDeviceAddIOProc( id, callbackHandler, (void *) &stream_.callbackInfo );
1418 if ( result != noErr ) {
1419 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting callback for device (" << device << ").";
1420 errorText_ = errorStream_.str();
1423 if ( stream_.mode == OUTPUT && mode == INPUT )
1424 stream_.mode = DUPLEX;
1426 stream_.mode = mode;
1429 // Setup the device property listener for over/underload.
1430 property.mSelector = kAudioDeviceProcessorOverload;
1431 property.mScope = kAudioObjectPropertyScopeGlobal;
1432 result = AudioObjectAddPropertyListener( id, &property, xrunListener, (void *) handle );
1438 pthread_cond_destroy( &handle->condition );
1440 stream_.apiHandle = 0;
1443 for ( int i=0; i<2; i++ ) {
1444 if ( stream_.userBuffer[i] ) {
1445 free( stream_.userBuffer[i] );
1446 stream_.userBuffer[i] = 0;
1450 if ( stream_.deviceBuffer ) {
1451 free( stream_.deviceBuffer );
1452 stream_.deviceBuffer = 0;
1455 stream_.state = STREAM_CLOSED;
1459 void RtApiCore :: closeStream( void )
1461 if ( stream_.state == STREAM_CLOSED ) {
1462 errorText_ = "RtApiCore::closeStream(): no open stream to close!";
1463 error( RtAudioError::WARNING );
1467 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1468 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1470 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
1471 kAudioObjectPropertyScopeGlobal,
1472 kAudioObjectPropertyElementMaster };
1474 property.mSelector = kAudioDeviceProcessorOverload;
1475 property.mScope = kAudioObjectPropertyScopeGlobal;
1476 if (AudioObjectRemovePropertyListener( handle->id[0], &property, xrunListener, (void *) handle ) != noErr) {
1477 errorText_ = "RtApiCore::closeStream(): error removing property listener!";
1478 error( RtAudioError::WARNING );
1481 if ( stream_.state == STREAM_RUNNING )
1482 AudioDeviceStop( handle->id[0], callbackHandler );
1483 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1484 AudioDeviceDestroyIOProcID( handle->id[0], handle->procId[0] );
1486 // deprecated in favor of AudioDeviceDestroyIOProcID()
1487 AudioDeviceRemoveIOProc( handle->id[0], callbackHandler );
1491 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1493 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
1494 kAudioObjectPropertyScopeGlobal,
1495 kAudioObjectPropertyElementMaster };
1497 property.mSelector = kAudioDeviceProcessorOverload;
1498 property.mScope = kAudioObjectPropertyScopeGlobal;
1499 if (AudioObjectRemovePropertyListener( handle->id[1], &property, xrunListener, (void *) handle ) != noErr) {
1500 errorText_ = "RtApiCore::closeStream(): error removing property listener!";
1501 error( RtAudioError::WARNING );
1504 if ( stream_.state == STREAM_RUNNING )
1505 AudioDeviceStop( handle->id[1], callbackHandler );
1506 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1507 AudioDeviceDestroyIOProcID( handle->id[1], handle->procId[1] );
1509 // deprecated in favor of AudioDeviceDestroyIOProcID()
1510 AudioDeviceRemoveIOProc( handle->id[1], callbackHandler );
1514 for ( int i=0; i<2; i++ ) {
1515 if ( stream_.userBuffer[i] ) {
1516 free( stream_.userBuffer[i] );
1517 stream_.userBuffer[i] = 0;
1521 if ( stream_.deviceBuffer ) {
1522 free( stream_.deviceBuffer );
1523 stream_.deviceBuffer = 0;
1526 // Destroy pthread condition variable.
1527 pthread_cond_destroy( &handle->condition );
1529 stream_.apiHandle = 0;
1531 stream_.mode = UNINITIALIZED;
1532 stream_.state = STREAM_CLOSED;
1535 void RtApiCore :: startStream( void )
1538 if ( stream_.state == STREAM_RUNNING ) {
1539 errorText_ = "RtApiCore::startStream(): the stream is already running!";
1540 error( RtAudioError::WARNING );
1544 #if defined( HAVE_GETTIMEOFDAY )
1545 gettimeofday( &stream_.lastTickTimestamp, NULL );
1548 OSStatus result = noErr;
1549 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1550 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1552 result = AudioDeviceStart( handle->id[0], callbackHandler );
1553 if ( result != noErr ) {
1554 errorStream_ << "RtApiCore::startStream: system error (" << getErrorCode( result ) << ") starting callback procedure on device (" << stream_.device[0] << ").";
1555 errorText_ = errorStream_.str();
1560 if ( stream_.mode == INPUT ||
1561 ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1563 result = AudioDeviceStart( handle->id[1], callbackHandler );
1564 if ( result != noErr ) {
1565 errorStream_ << "RtApiCore::startStream: system error starting input callback procedure on device (" << stream_.device[1] << ").";
1566 errorText_ = errorStream_.str();
1571 handle->drainCounter = 0;
1572 handle->internalDrain = false;
1573 stream_.state = STREAM_RUNNING;
1576 if ( result == noErr ) return;
1577 error( RtAudioError::SYSTEM_ERROR );
1580 void RtApiCore :: stopStream( void )
1583 if ( stream_.state == STREAM_STOPPED ) {
1584 errorText_ = "RtApiCore::stopStream(): the stream is already stopped!";
1585 error( RtAudioError::WARNING );
1589 OSStatus result = noErr;
1590 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1591 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1593 if ( handle->drainCounter == 0 ) {
1594 handle->drainCounter = 2;
1595 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
1598 result = AudioDeviceStop( handle->id[0], callbackHandler );
1599 if ( result != noErr ) {
1600 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping callback procedure on device (" << stream_.device[0] << ").";
1601 errorText_ = errorStream_.str();
1606 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1608 result = AudioDeviceStop( handle->id[1], callbackHandler );
1609 if ( result != noErr ) {
1610 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping input callback procedure on device (" << stream_.device[1] << ").";
1611 errorText_ = errorStream_.str();
1616 stream_.state = STREAM_STOPPED;
1619 if ( result == noErr ) return;
1620 error( RtAudioError::SYSTEM_ERROR );
1623 void RtApiCore :: abortStream( void )
1626 if ( stream_.state == STREAM_STOPPED ) {
1627 errorText_ = "RtApiCore::abortStream(): the stream is already stopped!";
1628 error( RtAudioError::WARNING );
1632 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1633 handle->drainCounter = 2;
1638 // This function will be called by a spawned thread when the user
1639 // callback function signals that the stream should be stopped or
1640 // aborted. It is better to handle it this way because the
1641 // callbackEvent() function probably should return before the AudioDeviceStop()
1642 // function is called.
1643 static void *coreStopStream( void *ptr )
1645 CallbackInfo *info = (CallbackInfo *) ptr;
1646 RtApiCore *object = (RtApiCore *) info->object;
1648 object->stopStream();
1649 pthread_exit( NULL );
1652 bool RtApiCore :: callbackEvent( AudioDeviceID deviceId,
1653 const AudioBufferList *inBufferList,
1654 const AudioBufferList *outBufferList )
1656 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
1657 if ( stream_.state == STREAM_CLOSED ) {
1658 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
1659 error( RtAudioError::WARNING );
1663 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
1664 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1666 // Check if we were draining the stream and signal is finished.
1667 if ( handle->drainCounter > 3 ) {
1668 ThreadHandle threadId;
1670 stream_.state = STREAM_STOPPING;
1671 if ( handle->internalDrain == true )
1672 pthread_create( &threadId, NULL, coreStopStream, info );
1673 else // external call to stopStream()
1674 pthread_cond_signal( &handle->condition );
1678 AudioDeviceID outputDevice = handle->id[0];
1680 // Invoke user callback to get fresh output data UNLESS we are
1681 // draining stream or duplex mode AND the input/output devices are
1682 // different AND this function is called for the input device.
1683 if ( handle->drainCounter == 0 && ( stream_.mode != DUPLEX || deviceId == outputDevice ) ) {
1684 RtAudioCallback callback = (RtAudioCallback) info->callback;
1685 double streamTime = getStreamTime();
1686 RtAudioStreamStatus status = 0;
1687 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
1688 status |= RTAUDIO_OUTPUT_UNDERFLOW;
1689 handle->xrun[0] = false;
1691 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
1692 status |= RTAUDIO_INPUT_OVERFLOW;
1693 handle->xrun[1] = false;
1696 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
1697 stream_.bufferSize, streamTime, status, info->userData );
1698 if ( cbReturnValue == 2 ) {
1699 stream_.state = STREAM_STOPPING;
1700 handle->drainCounter = 2;
1704 else if ( cbReturnValue == 1 ) {
1705 handle->drainCounter = 1;
1706 handle->internalDrain = true;
1710 if ( stream_.mode == OUTPUT || ( stream_.mode == DUPLEX && deviceId == outputDevice ) ) {
1712 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
1714 if ( handle->nStreams[0] == 1 ) {
1715 memset( outBufferList->mBuffers[handle->iStream[0]].mData,
1717 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
1719 else { // fill multiple streams with zeros
1720 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
1721 memset( outBufferList->mBuffers[handle->iStream[0]+i].mData,
1723 outBufferList->mBuffers[handle->iStream[0]+i].mDataByteSize );
1727 else if ( handle->nStreams[0] == 1 ) {
1728 if ( stream_.doConvertBuffer[0] ) { // convert directly to CoreAudio stream buffer
1729 convertBuffer( (char *) outBufferList->mBuffers[handle->iStream[0]].mData,
1730 stream_.userBuffer[0], stream_.convertInfo[0] );
1732 else { // copy from user buffer
1733 memcpy( outBufferList->mBuffers[handle->iStream[0]].mData,
1734 stream_.userBuffer[0],
1735 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
1738 else { // fill multiple streams
1739 Float32 *inBuffer = (Float32 *) stream_.userBuffer[0];
1740 if ( stream_.doConvertBuffer[0] ) {
1741 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
1742 inBuffer = (Float32 *) stream_.deviceBuffer;
1745 if ( stream_.deviceInterleaved[0] == false ) { // mono mode
1746 UInt32 bufferBytes = outBufferList->mBuffers[handle->iStream[0]].mDataByteSize;
1747 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
1748 memcpy( outBufferList->mBuffers[handle->iStream[0]+i].mData,
1749 (void *)&inBuffer[i*stream_.bufferSize], bufferBytes );
1752 else { // fill multiple multi-channel streams with interleaved data
1753 UInt32 streamChannels, channelsLeft, inJump, outJump, inOffset;
1756 bool inInterleaved = ( stream_.userInterleaved ) ? true : false;
1757 UInt32 inChannels = stream_.nUserChannels[0];
1758 if ( stream_.doConvertBuffer[0] ) {
1759 inInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
1760 inChannels = stream_.nDeviceChannels[0];
1763 if ( inInterleaved ) inOffset = 1;
1764 else inOffset = stream_.bufferSize;
1766 channelsLeft = inChannels;
1767 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
1769 out = (Float32 *) outBufferList->mBuffers[handle->iStream[0]+i].mData;
1770 streamChannels = outBufferList->mBuffers[handle->iStream[0]+i].mNumberChannels;
1773 // Account for possible channel offset in first stream
1774 if ( i == 0 && stream_.channelOffset[0] > 0 ) {
1775 streamChannels -= stream_.channelOffset[0];
1776 outJump = stream_.channelOffset[0];
1780 // Account for possible unfilled channels at end of the last stream
1781 if ( streamChannels > channelsLeft ) {
1782 outJump = streamChannels - channelsLeft;
1783 streamChannels = channelsLeft;
1786 // Determine input buffer offsets and skips
1787 if ( inInterleaved ) {
1788 inJump = inChannels;
1789 in += inChannels - channelsLeft;
1793 in += (inChannels - channelsLeft) * inOffset;
1796 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
1797 for ( unsigned int j=0; j<streamChannels; j++ ) {
1798 *out++ = in[j*inOffset];
1803 channelsLeft -= streamChannels;
1809 // Don't bother draining input
1810 if ( handle->drainCounter ) {
1811 handle->drainCounter++;
1815 AudioDeviceID inputDevice;
1816 inputDevice = handle->id[1];
1817 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && deviceId == inputDevice ) ) {
1819 if ( handle->nStreams[1] == 1 ) {
1820 if ( stream_.doConvertBuffer[1] ) { // convert directly from CoreAudio stream buffer
1821 convertBuffer( stream_.userBuffer[1],
1822 (char *) inBufferList->mBuffers[handle->iStream[1]].mData,
1823 stream_.convertInfo[1] );
1825 else { // copy to user buffer
1826 memcpy( stream_.userBuffer[1],
1827 inBufferList->mBuffers[handle->iStream[1]].mData,
1828 inBufferList->mBuffers[handle->iStream[1]].mDataByteSize );
1831 else { // read from multiple streams
1832 Float32 *outBuffer = (Float32 *) stream_.userBuffer[1];
1833 if ( stream_.doConvertBuffer[1] ) outBuffer = (Float32 *) stream_.deviceBuffer;
1835 if ( stream_.deviceInterleaved[1] == false ) { // mono mode
1836 UInt32 bufferBytes = inBufferList->mBuffers[handle->iStream[1]].mDataByteSize;
1837 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
1838 memcpy( (void *)&outBuffer[i*stream_.bufferSize],
1839 inBufferList->mBuffers[handle->iStream[1]+i].mData, bufferBytes );
1842 else { // read from multiple multi-channel streams
1843 UInt32 streamChannels, channelsLeft, inJump, outJump, outOffset;
1846 bool outInterleaved = ( stream_.userInterleaved ) ? true : false;
1847 UInt32 outChannels = stream_.nUserChannels[1];
1848 if ( stream_.doConvertBuffer[1] ) {
1849 outInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
1850 outChannels = stream_.nDeviceChannels[1];
1853 if ( outInterleaved ) outOffset = 1;
1854 else outOffset = stream_.bufferSize;
1856 channelsLeft = outChannels;
1857 for ( unsigned int i=0; i<handle->nStreams[1]; i++ ) {
1859 in = (Float32 *) inBufferList->mBuffers[handle->iStream[1]+i].mData;
1860 streamChannels = inBufferList->mBuffers[handle->iStream[1]+i].mNumberChannels;
1863 // Account for possible channel offset in first stream
1864 if ( i == 0 && stream_.channelOffset[1] > 0 ) {
1865 streamChannels -= stream_.channelOffset[1];
1866 inJump = stream_.channelOffset[1];
1870 // Account for possible unread channels at end of the last stream
1871 if ( streamChannels > channelsLeft ) {
1872 inJump = streamChannels - channelsLeft;
1873 streamChannels = channelsLeft;
1876 // Determine output buffer offsets and skips
1877 if ( outInterleaved ) {
1878 outJump = outChannels;
1879 out += outChannels - channelsLeft;
1883 out += (outChannels - channelsLeft) * outOffset;
1886 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
1887 for ( unsigned int j=0; j<streamChannels; j++ ) {
1888 out[j*outOffset] = *in++;
1893 channelsLeft -= streamChannels;
1897 if ( stream_.doConvertBuffer[1] ) { // convert from our internal "device" buffer
1898 convertBuffer( stream_.userBuffer[1],
1899 stream_.deviceBuffer,
1900 stream_.convertInfo[1] );
1906 //MUTEX_UNLOCK( &stream_.mutex );
1908 RtApi::tickStreamTime();
1912 const char* RtApiCore :: getErrorCode( OSStatus code )
1916 case kAudioHardwareNotRunningError:
1917 return "kAudioHardwareNotRunningError";
1919 case kAudioHardwareUnspecifiedError:
1920 return "kAudioHardwareUnspecifiedError";
1922 case kAudioHardwareUnknownPropertyError:
1923 return "kAudioHardwareUnknownPropertyError";
1925 case kAudioHardwareBadPropertySizeError:
1926 return "kAudioHardwareBadPropertySizeError";
1928 case kAudioHardwareIllegalOperationError:
1929 return "kAudioHardwareIllegalOperationError";
1931 case kAudioHardwareBadObjectError:
1932 return "kAudioHardwareBadObjectError";
1934 case kAudioHardwareBadDeviceError:
1935 return "kAudioHardwareBadDeviceError";
1937 case kAudioHardwareBadStreamError:
1938 return "kAudioHardwareBadStreamError";
1940 case kAudioHardwareUnsupportedOperationError:
1941 return "kAudioHardwareUnsupportedOperationError";
1943 case kAudioDeviceUnsupportedFormatError:
1944 return "kAudioDeviceUnsupportedFormatError";
1946 case kAudioDevicePermissionsError:
1947 return "kAudioDevicePermissionsError";
1950 return "CoreAudio unknown error";
1954 //******************** End of __MACOSX_CORE__ *********************//
1957 #if defined(__UNIX_JACK__)
1959 // JACK is a low-latency audio server, originally written for the
1960 // GNU/Linux operating system and now also ported to OS-X. It can
1961 // connect a number of different applications to an audio device, as
1962 // well as allowing them to share audio between themselves.
1964 // When using JACK with RtAudio, "devices" refer to JACK clients that
1965 // have ports connected to the server. The JACK server is typically
1966 // started in a terminal as follows:
1968 // .jackd -d alsa -d hw:0
1970 // or through an interface program such as qjackctl. Many of the
1971 // parameters normally set for a stream are fixed by the JACK server
1972 // and can be specified when the JACK server is started. In
1975 // .jackd -d alsa -d hw:0 -r 44100 -p 512 -n 4
1977 // specifies a sample rate of 44100 Hz, a buffer size of 512 sample
1978 // frames, and number of buffers = 4. Once the server is running, it
1979 // is not possible to override these values. If the values are not
1980 // specified in the command-line, the JACK server uses default values.
1982 // The JACK server does not have to be running when an instance of
1983 // RtApiJack is created, though the function getDeviceCount() will
1984 // report 0 devices found until JACK has been started. When no
1985 // devices are available (i.e., the JACK server is not running), a
1986 // stream cannot be opened.
1988 #include <jack/jack.h>
1992 // A structure to hold various information related to the Jack API
1995 jack_client_t *client;
1996 jack_port_t **ports[2];
1997 std::string deviceName[2];
1999 pthread_cond_t condition;
2000 int drainCounter; // Tracks callback counts when draining
2001 bool internalDrain; // Indicates if stop is initiated from callback or not.
2004 :client(0), drainCounter(0), internalDrain(false) { ports[0] = 0; ports[1] = 0; xrun[0] = false; xrun[1] = false; }
2007 #if !defined(__RTAUDIO_DEBUG__)
2008 static void jackSilentError( const char * ) {};
2011 RtApiJack :: RtApiJack()
2012 :shouldAutoconnect_(true) {
2013 // Nothing to do here.
2014 #if !defined(__RTAUDIO_DEBUG__)
2015 // Turn off Jack's internal error reporting.
2016 jack_set_error_function( &jackSilentError );
2020 RtApiJack :: ~RtApiJack()
2022 if ( stream_.state != STREAM_CLOSED ) closeStream();
2025 unsigned int RtApiJack :: getDeviceCount( void )
2027 // See if we can become a jack client.
2028 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
2029 jack_status_t *status = NULL;
2030 jack_client_t *client = jack_client_open( "RtApiJackCount", options, status );
2031 if ( client == 0 ) return 0;
2034 std::string port, previousPort;
2035 unsigned int nChannels = 0, nDevices = 0;
2036 ports = jack_get_ports( client, NULL, JACK_DEFAULT_AUDIO_TYPE, 0 );
2038 // Parse the port names up to the first colon (:).
2041 port = (char *) ports[ nChannels ];
2042 iColon = port.find(":");
2043 if ( iColon != std::string::npos ) {
2044 port = port.substr( 0, iColon + 1 );
2045 if ( port != previousPort ) {
2047 previousPort = port;
2050 } while ( ports[++nChannels] );
2054 jack_client_close( client );
2058 RtAudio::DeviceInfo RtApiJack :: getDeviceInfo( unsigned int device )
2060 RtAudio::DeviceInfo info;
2061 info.probed = false;
2063 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption
2064 jack_status_t *status = NULL;
2065 jack_client_t *client = jack_client_open( "RtApiJackInfo", options, status );
2066 if ( client == 0 ) {
2067 errorText_ = "RtApiJack::getDeviceInfo: Jack server not found or connection error!";
2068 error( RtAudioError::WARNING );
2073 std::string port, previousPort;
2074 unsigned int nPorts = 0, nDevices = 0;
2075 ports = jack_get_ports( client, NULL, JACK_DEFAULT_AUDIO_TYPE, 0 );
2077 // Parse the port names up to the first colon (:).
2080 port = (char *) ports[ nPorts ];
2081 iColon = port.find(":");
2082 if ( iColon != std::string::npos ) {
2083 port = port.substr( 0, iColon );
2084 if ( port != previousPort ) {
2085 if ( nDevices == device ) info.name = port;
2087 previousPort = port;
2090 } while ( ports[++nPorts] );
2094 if ( device >= nDevices ) {
2095 jack_client_close( client );
2096 errorText_ = "RtApiJack::getDeviceInfo: device ID is invalid!";
2097 error( RtAudioError::INVALID_USE );
2101 // Get the current jack server sample rate.
2102 info.sampleRates.clear();
2104 info.preferredSampleRate = jack_get_sample_rate( client );
2105 info.sampleRates.push_back( info.preferredSampleRate );
2107 // Count the available ports containing the client name as device
2108 // channels. Jack "input ports" equal RtAudio output channels.
2109 unsigned int nChannels = 0;
2110 ports = jack_get_ports( client, info.name.c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput );
2112 while ( ports[ nChannels ] ) nChannels++;
2114 info.outputChannels = nChannels;
2117 // Jack "output ports" equal RtAudio input channels.
2119 ports = jack_get_ports( client, info.name.c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput );
2121 while ( ports[ nChannels ] ) nChannels++;
2123 info.inputChannels = nChannels;
2126 if ( info.outputChannels == 0 && info.inputChannels == 0 ) {
2127 jack_client_close(client);
2128 errorText_ = "RtApiJack::getDeviceInfo: error determining Jack input/output channels!";
2129 error( RtAudioError::WARNING );
2133 // If device opens for both playback and capture, we determine the channels.
2134 if ( info.outputChannels > 0 && info.inputChannels > 0 )
2135 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
2137 // Jack always uses 32-bit floats.
2138 info.nativeFormats = RTAUDIO_FLOAT32;
2140 // Jack doesn't provide default devices so we'll use the first available one.
2141 if ( device == 0 && info.outputChannels > 0 )
2142 info.isDefaultOutput = true;
2143 if ( device == 0 && info.inputChannels > 0 )
2144 info.isDefaultInput = true;
2146 jack_client_close(client);
2151 static int jackCallbackHandler( jack_nframes_t nframes, void *infoPointer )
2153 CallbackInfo *info = (CallbackInfo *) infoPointer;
2155 RtApiJack *object = (RtApiJack *) info->object;
2156 if ( object->callbackEvent( (unsigned long) nframes ) == false ) return 1;
2161 // This function will be called by a spawned thread when the Jack
2162 // server signals that it is shutting down. It is necessary to handle
2163 // it this way because the jackShutdown() function must return before
2164 // the jack_deactivate() function (in closeStream()) will return.
2165 static void *jackCloseStream( void *ptr )
2167 CallbackInfo *info = (CallbackInfo *) ptr;
2168 RtApiJack *object = (RtApiJack *) info->object;
2170 object->closeStream();
2172 pthread_exit( NULL );
2174 static void jackShutdown( void *infoPointer )
2176 CallbackInfo *info = (CallbackInfo *) infoPointer;
2177 RtApiJack *object = (RtApiJack *) info->object;
2179 // Check current stream state. If stopped, then we'll assume this
2180 // was called as a result of a call to RtApiJack::stopStream (the
2181 // deactivation of a client handle causes this function to be called).
2182 // If not, we'll assume the Jack server is shutting down or some
2183 // other problem occurred and we should close the stream.
2184 if ( object->isStreamRunning() == false ) return;
2186 ThreadHandle threadId;
2187 pthread_create( &threadId, NULL, jackCloseStream, info );
2188 std::cerr << "\nRtApiJack: the Jack server is shutting down this client ... stream stopped and closed!!\n" << std::endl;
2191 static int jackXrun( void *infoPointer )
2193 JackHandle *handle = *((JackHandle **) infoPointer);
2195 if ( handle->ports[0] ) handle->xrun[0] = true;
2196 if ( handle->ports[1] ) handle->xrun[1] = true;
2201 bool RtApiJack :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
2202 unsigned int firstChannel, unsigned int sampleRate,
2203 RtAudioFormat format, unsigned int *bufferSize,
2204 RtAudio::StreamOptions *options )
2206 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2208 // Look for jack server and try to become a client (only do once per stream).
2209 jack_client_t *client = 0;
2210 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) {
2211 jack_options_t jackoptions = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
2212 jack_status_t *status = NULL;
2213 if ( options && !options->streamName.empty() )
2214 client = jack_client_open( options->streamName.c_str(), jackoptions, status );
2216 client = jack_client_open( "RtApiJack", jackoptions, status );
2217 if ( client == 0 ) {
2218 errorText_ = "RtApiJack::probeDeviceOpen: Jack server not found or connection error!";
2219 error( RtAudioError::WARNING );
2224 // The handle must have been created on an earlier pass.
2225 client = handle->client;
2229 std::string port, previousPort, deviceName;
2230 unsigned int nPorts = 0, nDevices = 0;
2231 ports = jack_get_ports( client, NULL, JACK_DEFAULT_AUDIO_TYPE, 0 );
2233 // Parse the port names up to the first colon (:).
2236 port = (char *) ports[ nPorts ];
2237 iColon = port.find(":");
2238 if ( iColon != std::string::npos ) {
2239 port = port.substr( 0, iColon );
2240 if ( port != previousPort ) {
2241 if ( nDevices == device ) deviceName = port;
2243 previousPort = port;
2246 } while ( ports[++nPorts] );
2250 if ( device >= nDevices ) {
2251 errorText_ = "RtApiJack::probeDeviceOpen: device ID is invalid!";
2255 unsigned long flag = JackPortIsInput;
2256 if ( mode == INPUT ) flag = JackPortIsOutput;
2258 if ( ! (options && (options->flags & RTAUDIO_JACK_DONT_CONNECT)) ) {
2259 // Count the available ports containing the client name as device
2260 // channels. Jack "input ports" equal RtAudio output channels.
2261 unsigned int nChannels = 0;
2262 ports = jack_get_ports( client, deviceName.c_str(), JACK_DEFAULT_AUDIO_TYPE, flag );
2264 while ( ports[ nChannels ] ) nChannels++;
2267 // Compare the jack ports for specified client to the requested number of channels.
2268 if ( nChannels < (channels + firstChannel) ) {
2269 errorStream_ << "RtApiJack::probeDeviceOpen: requested number of channels (" << channels << ") + offset (" << firstChannel << ") not found for specified device (" << device << ":" << deviceName << ").";
2270 errorText_ = errorStream_.str();
2275 // Check the jack server sample rate.
2276 unsigned int jackRate = jack_get_sample_rate( client );
2277 if ( sampleRate != jackRate ) {
2278 jack_client_close( client );
2279 errorStream_ << "RtApiJack::probeDeviceOpen: the requested sample rate (" << sampleRate << ") is different than the JACK server rate (" << jackRate << ").";
2280 errorText_ = errorStream_.str();
2283 stream_.sampleRate = jackRate;
2285 // Get the latency of the JACK port.
2286 ports = jack_get_ports( client, deviceName.c_str(), JACK_DEFAULT_AUDIO_TYPE, flag );
2287 if ( ports[ firstChannel ] ) {
2289 jack_latency_callback_mode_t cbmode = (mode == INPUT ? JackCaptureLatency : JackPlaybackLatency);
2290 // the range (usually the min and max are equal)
2291 jack_latency_range_t latrange; latrange.min = latrange.max = 0;
2292 // get the latency range
2293 jack_port_get_latency_range( jack_port_by_name( client, ports[firstChannel] ), cbmode, &latrange );
2294 // be optimistic, use the min!
2295 stream_.latency[mode] = latrange.min;
2296 //stream_.latency[mode] = jack_port_get_latency( jack_port_by_name( client, ports[ firstChannel ] ) );
2300 // The jack server always uses 32-bit floating-point data.
2301 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
2302 stream_.userFormat = format;
2304 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
2305 else stream_.userInterleaved = true;
2307 // Jack always uses non-interleaved buffers.
2308 stream_.deviceInterleaved[mode] = false;
2310 // Jack always provides host byte-ordered data.
2311 stream_.doByteSwap[mode] = false;
2313 // Get the buffer size. The buffer size and number of buffers
2314 // (periods) is set when the jack server is started.
2315 stream_.bufferSize = (int) jack_get_buffer_size( client );
2316 *bufferSize = stream_.bufferSize;
2318 stream_.nDeviceChannels[mode] = channels;
2319 stream_.nUserChannels[mode] = channels;
2321 // Set flags for buffer conversion.
2322 stream_.doConvertBuffer[mode] = false;
2323 if ( stream_.userFormat != stream_.deviceFormat[mode] )
2324 stream_.doConvertBuffer[mode] = true;
2325 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
2326 stream_.nUserChannels[mode] > 1 )
2327 stream_.doConvertBuffer[mode] = true;
2329 // Allocate our JackHandle structure for the stream.
2330 if ( handle == 0 ) {
2332 handle = new JackHandle;
2334 catch ( std::bad_alloc& ) {
2335 errorText_ = "RtApiJack::probeDeviceOpen: error allocating JackHandle memory.";
2339 if ( pthread_cond_init(&handle->condition, NULL) ) {
2340 errorText_ = "RtApiJack::probeDeviceOpen: error initializing pthread condition variable.";
2343 stream_.apiHandle = (void *) handle;
2344 handle->client = client;
2346 handle->deviceName[mode] = deviceName;
2348 // Allocate necessary internal buffers.
2349 unsigned long bufferBytes;
2350 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
2351 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
2352 if ( stream_.userBuffer[mode] == NULL ) {
2353 errorText_ = "RtApiJack::probeDeviceOpen: error allocating user buffer memory.";
2357 if ( stream_.doConvertBuffer[mode] ) {
2359 bool makeBuffer = true;
2360 if ( mode == OUTPUT )
2361 bufferBytes = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
2362 else { // mode == INPUT
2363 bufferBytes = stream_.nDeviceChannels[1] * formatBytes( stream_.deviceFormat[1] );
2364 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
2365 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes(stream_.deviceFormat[0]);
2366 if ( bufferBytes < bytesOut ) makeBuffer = false;
2371 bufferBytes *= *bufferSize;
2372 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
2373 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
2374 if ( stream_.deviceBuffer == NULL ) {
2375 errorText_ = "RtApiJack::probeDeviceOpen: error allocating device buffer memory.";
2381 // Allocate memory for the Jack ports (channels) identifiers.
2382 handle->ports[mode] = (jack_port_t **) malloc ( sizeof (jack_port_t *) * channels );
2383 if ( handle->ports[mode] == NULL ) {
2384 errorText_ = "RtApiJack::probeDeviceOpen: error allocating port memory.";
2388 stream_.device[mode] = device;
2389 stream_.channelOffset[mode] = firstChannel;
2390 stream_.state = STREAM_STOPPED;
2391 stream_.callbackInfo.object = (void *) this;
2393 if ( stream_.mode == OUTPUT && mode == INPUT )
2394 // We had already set up the stream for output.
2395 stream_.mode = DUPLEX;
2397 stream_.mode = mode;
2398 jack_set_process_callback( handle->client, jackCallbackHandler, (void *) &stream_.callbackInfo );
2399 jack_set_xrun_callback( handle->client, jackXrun, (void *) &stream_.apiHandle );
2400 jack_on_shutdown( handle->client, jackShutdown, (void *) &stream_.callbackInfo );
2403 // Register our ports.
2405 if ( mode == OUTPUT ) {
2406 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2407 snprintf( label, 64, "outport %d", i );
2408 handle->ports[0][i] = jack_port_register( handle->client, (const char *)label,
2409 JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput, 0 );
2413 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2414 snprintf( label, 64, "inport %d", i );
2415 handle->ports[1][i] = jack_port_register( handle->client, (const char *)label,
2416 JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput, 0 );
2420 // Setup the buffer conversion information structure. We don't use
2421 // buffers to do channel offsets, so we override that parameter
2423 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
2425 if ( options && options->flags & RTAUDIO_JACK_DONT_CONNECT ) shouldAutoconnect_ = false;
2431 pthread_cond_destroy( &handle->condition );
2432 jack_client_close( handle->client );
2434 if ( handle->ports[0] ) free( handle->ports[0] );
2435 if ( handle->ports[1] ) free( handle->ports[1] );
2438 stream_.apiHandle = 0;
2441 for ( int i=0; i<2; i++ ) {
2442 if ( stream_.userBuffer[i] ) {
2443 free( stream_.userBuffer[i] );
2444 stream_.userBuffer[i] = 0;
2448 if ( stream_.deviceBuffer ) {
2449 free( stream_.deviceBuffer );
2450 stream_.deviceBuffer = 0;
2456 void RtApiJack :: closeStream( void )
2458 if ( stream_.state == STREAM_CLOSED ) {
2459 errorText_ = "RtApiJack::closeStream(): no open stream to close!";
2460 error( RtAudioError::WARNING );
2464 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2467 if ( stream_.state == STREAM_RUNNING )
2468 jack_deactivate( handle->client );
2470 jack_client_close( handle->client );
2474 if ( handle->ports[0] ) free( handle->ports[0] );
2475 if ( handle->ports[1] ) free( handle->ports[1] );
2476 pthread_cond_destroy( &handle->condition );
2478 stream_.apiHandle = 0;
2481 for ( int i=0; i<2; i++ ) {
2482 if ( stream_.userBuffer[i] ) {
2483 free( stream_.userBuffer[i] );
2484 stream_.userBuffer[i] = 0;
2488 if ( stream_.deviceBuffer ) {
2489 free( stream_.deviceBuffer );
2490 stream_.deviceBuffer = 0;
2493 stream_.mode = UNINITIALIZED;
2494 stream_.state = STREAM_CLOSED;
2497 void RtApiJack :: startStream( void )
2500 if ( stream_.state == STREAM_RUNNING ) {
2501 errorText_ = "RtApiJack::startStream(): the stream is already running!";
2502 error( RtAudioError::WARNING );
2506 #if defined( HAVE_GETTIMEOFDAY )
2507 gettimeofday( &stream_.lastTickTimestamp, NULL );
2510 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2511 int result = jack_activate( handle->client );
2513 errorText_ = "RtApiJack::startStream(): unable to activate JACK client!";
2519 // Get the list of available ports.
2520 if ( shouldAutoconnect_ && (stream_.mode == OUTPUT || stream_.mode == DUPLEX) ) {
2522 ports = jack_get_ports( handle->client, handle->deviceName[0].c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput);
2523 if ( ports == NULL) {
2524 errorText_ = "RtApiJack::startStream(): error determining available JACK input ports!";
2528 // Now make the port connections. Since RtAudio wasn't designed to
2529 // allow the user to select particular channels of a device, we'll
2530 // just open the first "nChannels" ports with offset.
2531 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2533 if ( ports[ stream_.channelOffset[0] + i ] )
2534 result = jack_connect( handle->client, jack_port_name( handle->ports[0][i] ), ports[ stream_.channelOffset[0] + i ] );
2537 errorText_ = "RtApiJack::startStream(): error connecting output ports!";
2544 if ( shouldAutoconnect_ && (stream_.mode == INPUT || stream_.mode == DUPLEX) ) {
2546 ports = jack_get_ports( handle->client, handle->deviceName[1].c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput );
2547 if ( ports == NULL) {
2548 errorText_ = "RtApiJack::startStream(): error determining available JACK output ports!";
2552 // Now make the port connections. See note above.
2553 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2555 if ( ports[ stream_.channelOffset[1] + i ] )
2556 result = jack_connect( handle->client, ports[ stream_.channelOffset[1] + i ], jack_port_name( handle->ports[1][i] ) );
2559 errorText_ = "RtApiJack::startStream(): error connecting input ports!";
2566 handle->drainCounter = 0;
2567 handle->internalDrain = false;
2568 stream_.state = STREAM_RUNNING;
2571 if ( result == 0 ) return;
2572 error( RtAudioError::SYSTEM_ERROR );
2575 void RtApiJack :: stopStream( void )
2578 if ( stream_.state == STREAM_STOPPED ) {
2579 errorText_ = "RtApiJack::stopStream(): the stream is already stopped!";
2580 error( RtAudioError::WARNING );
2584 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2585 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
2587 if ( handle->drainCounter == 0 ) {
2588 handle->drainCounter = 2;
2589 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
2593 jack_deactivate( handle->client );
2594 stream_.state = STREAM_STOPPED;
2597 void RtApiJack :: abortStream( void )
2600 if ( stream_.state == STREAM_STOPPED ) {
2601 errorText_ = "RtApiJack::abortStream(): the stream is already stopped!";
2602 error( RtAudioError::WARNING );
2606 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2607 handle->drainCounter = 2;
2612 // This function will be called by a spawned thread when the user
2613 // callback function signals that the stream should be stopped or
2614 // aborted. It is necessary to handle it this way because the
2615 // callbackEvent() function must return before the jack_deactivate()
2616 // function will return.
2617 static void *jackStopStream( void *ptr )
2619 CallbackInfo *info = (CallbackInfo *) ptr;
2620 RtApiJack *object = (RtApiJack *) info->object;
2622 object->stopStream();
2623 pthread_exit( NULL );
2626 bool RtApiJack :: callbackEvent( unsigned long nframes )
2628 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
2629 if ( stream_.state == STREAM_CLOSED ) {
2630 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
2631 error( RtAudioError::WARNING );
2634 if ( stream_.bufferSize != nframes ) {
2635 errorText_ = "RtApiCore::callbackEvent(): the JACK buffer size has changed ... cannot process!";
2636 error( RtAudioError::WARNING );
2640 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
2641 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2643 // Check if we were draining the stream and signal is finished.
2644 if ( handle->drainCounter > 3 ) {
2645 ThreadHandle threadId;
2647 stream_.state = STREAM_STOPPING;
2648 if ( handle->internalDrain == true )
2649 pthread_create( &threadId, NULL, jackStopStream, info );
2651 pthread_cond_signal( &handle->condition );
2655 // Invoke user callback first, to get fresh output data.
2656 if ( handle->drainCounter == 0 ) {
2657 RtAudioCallback callback = (RtAudioCallback) info->callback;
2658 double streamTime = getStreamTime();
2659 RtAudioStreamStatus status = 0;
2660 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
2661 status |= RTAUDIO_OUTPUT_UNDERFLOW;
2662 handle->xrun[0] = false;
2664 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
2665 status |= RTAUDIO_INPUT_OVERFLOW;
2666 handle->xrun[1] = false;
2668 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
2669 stream_.bufferSize, streamTime, status, info->userData );
2670 if ( cbReturnValue == 2 ) {
2671 stream_.state = STREAM_STOPPING;
2672 handle->drainCounter = 2;
2674 pthread_create( &id, NULL, jackStopStream, info );
2677 else if ( cbReturnValue == 1 ) {
2678 handle->drainCounter = 1;
2679 handle->internalDrain = true;
2683 jack_default_audio_sample_t *jackbuffer;
2684 unsigned long bufferBytes = nframes * sizeof( jack_default_audio_sample_t );
2685 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
2687 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
2689 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
2690 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2691 memset( jackbuffer, 0, bufferBytes );
2695 else if ( stream_.doConvertBuffer[0] ) {
2697 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
2699 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
2700 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2701 memcpy( jackbuffer, &stream_.deviceBuffer[i*bufferBytes], bufferBytes );
2704 else { // no buffer conversion
2705 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2706 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2707 memcpy( jackbuffer, &stream_.userBuffer[0][i*bufferBytes], bufferBytes );
2712 // Don't bother draining input
2713 if ( handle->drainCounter ) {
2714 handle->drainCounter++;
2718 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
2720 if ( stream_.doConvertBuffer[1] ) {
2721 for ( unsigned int i=0; i<stream_.nDeviceChannels[1]; i++ ) {
2722 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
2723 memcpy( &stream_.deviceBuffer[i*bufferBytes], jackbuffer, bufferBytes );
2725 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
2727 else { // no buffer conversion
2728 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2729 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
2730 memcpy( &stream_.userBuffer[1][i*bufferBytes], jackbuffer, bufferBytes );
2736 RtApi::tickStreamTime();
2739 //******************** End of __UNIX_JACK__ *********************//
2742 #if defined(__WINDOWS_ASIO__) // ASIO API on Windows
2744 // The ASIO API is designed around a callback scheme, so this
2745 // implementation is similar to that used for OS-X CoreAudio and Linux
2746 // Jack. The primary constraint with ASIO is that it only allows
2747 // access to a single driver at a time. Thus, it is not possible to
2748 // have more than one simultaneous RtAudio stream.
2750 // This implementation also requires a number of external ASIO files
2751 // and a few global variables. The ASIO callback scheme does not
2752 // allow for the passing of user data, so we must create a global
2753 // pointer to our callbackInfo structure.
2755 // On unix systems, we make use of a pthread condition variable.
2756 // Since there is no equivalent in Windows, I hacked something based
2757 // on information found in
2758 // http://www.cs.wustl.edu/~schmidt/win32-cv-1.html.
2760 #include "asiosys.h"
2762 #include "iasiothiscallresolver.h"
2763 #include "asiodrivers.h"
2766 static AsioDrivers drivers;
2767 static ASIOCallbacks asioCallbacks;
2768 static ASIODriverInfo driverInfo;
2769 static CallbackInfo *asioCallbackInfo;
2770 static bool asioXRun;
2773 int drainCounter; // Tracks callback counts when draining
2774 bool internalDrain; // Indicates if stop is initiated from callback or not.
2775 ASIOBufferInfo *bufferInfos;
2779 :drainCounter(0), internalDrain(false), bufferInfos(0) {}
2782 // Function declarations (definitions at end of section)
2783 static const char* getAsioErrorString( ASIOError result );
2784 static void sampleRateChanged( ASIOSampleRate sRate );
2785 static long asioMessages( long selector, long value, void* message, double* opt );
2787 RtApiAsio :: RtApiAsio()
2789 // ASIO cannot run on a multi-threaded appartment. You can call
2790 // CoInitialize beforehand, but it must be for appartment threading
2791 // (in which case, CoInitilialize will return S_FALSE here).
2792 coInitialized_ = false;
2793 HRESULT hr = CoInitialize( NULL );
2795 errorText_ = "RtApiAsio::ASIO requires a single-threaded appartment. Call CoInitializeEx(0,COINIT_APARTMENTTHREADED)";
2796 error( RtAudioError::WARNING );
2798 coInitialized_ = true;
2800 drivers.removeCurrentDriver();
2801 driverInfo.asioVersion = 2;
2803 // See note in DirectSound implementation about GetDesktopWindow().
2804 driverInfo.sysRef = GetForegroundWindow();
2807 RtApiAsio :: ~RtApiAsio()
2809 if ( stream_.state != STREAM_CLOSED ) closeStream();
2810 if ( coInitialized_ ) CoUninitialize();
2813 unsigned int RtApiAsio :: getDeviceCount( void )
2815 return (unsigned int) drivers.asioGetNumDev();
2818 RtAudio::DeviceInfo RtApiAsio :: getDeviceInfo( unsigned int device )
2820 RtAudio::DeviceInfo info;
2821 info.probed = false;
2824 unsigned int nDevices = getDeviceCount();
2825 if ( nDevices == 0 ) {
2826 errorText_ = "RtApiAsio::getDeviceInfo: no devices found!";
2827 error( RtAudioError::INVALID_USE );
2831 if ( device >= nDevices ) {
2832 errorText_ = "RtApiAsio::getDeviceInfo: device ID is invalid!";
2833 error( RtAudioError::INVALID_USE );
2837 // If a stream is already open, we cannot probe other devices. Thus, use the saved results.
2838 if ( stream_.state != STREAM_CLOSED ) {
2839 if ( device >= devices_.size() ) {
2840 errorText_ = "RtApiAsio::getDeviceInfo: device ID was not present before stream was opened.";
2841 error( RtAudioError::WARNING );
2844 return devices_[ device ];
2847 char driverName[32];
2848 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
2849 if ( result != ASE_OK ) {
2850 errorStream_ << "RtApiAsio::getDeviceInfo: unable to get driver name (" << getAsioErrorString( result ) << ").";
2851 errorText_ = errorStream_.str();
2852 error( RtAudioError::WARNING );
2856 info.name = driverName;
2858 if ( !drivers.loadDriver( driverName ) ) {
2859 errorStream_ << "RtApiAsio::getDeviceInfo: unable to load driver (" << driverName << ").";
2860 errorText_ = errorStream_.str();
2861 error( RtAudioError::WARNING );
2865 result = ASIOInit( &driverInfo );
2866 if ( result != ASE_OK ) {
2867 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
2868 errorText_ = errorStream_.str();
2869 error( RtAudioError::WARNING );
2873 // Determine the device channel information.
2874 long inputChannels, outputChannels;
2875 result = ASIOGetChannels( &inputChannels, &outputChannels );
2876 if ( result != ASE_OK ) {
2877 drivers.removeCurrentDriver();
2878 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
2879 errorText_ = errorStream_.str();
2880 error( RtAudioError::WARNING );
2884 info.outputChannels = outputChannels;
2885 info.inputChannels = inputChannels;
2886 if ( info.outputChannels > 0 && info.inputChannels > 0 )
2887 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
2889 // Determine the supported sample rates.
2890 info.sampleRates.clear();
2891 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
2892 result = ASIOCanSampleRate( (ASIOSampleRate) SAMPLE_RATES[i] );
2893 if ( result == ASE_OK ) {
2894 info.sampleRates.push_back( SAMPLE_RATES[i] );
2896 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
2897 info.preferredSampleRate = SAMPLE_RATES[i];
2901 // Determine supported data types ... just check first channel and assume rest are the same.
2902 ASIOChannelInfo channelInfo;
2903 channelInfo.channel = 0;
2904 channelInfo.isInput = true;
2905 if ( info.inputChannels <= 0 ) channelInfo.isInput = false;
2906 result = ASIOGetChannelInfo( &channelInfo );
2907 if ( result != ASE_OK ) {
2908 drivers.removeCurrentDriver();
2909 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting driver channel info (" << driverName << ").";
2910 errorText_ = errorStream_.str();
2911 error( RtAudioError::WARNING );
2915 info.nativeFormats = 0;
2916 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB )
2917 info.nativeFormats |= RTAUDIO_SINT16;
2918 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB )
2919 info.nativeFormats |= RTAUDIO_SINT32;
2920 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB )
2921 info.nativeFormats |= RTAUDIO_FLOAT32;
2922 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB )
2923 info.nativeFormats |= RTAUDIO_FLOAT64;
2924 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB )
2925 info.nativeFormats |= RTAUDIO_SINT24;
2927 if ( info.outputChannels > 0 )
2928 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
2929 if ( info.inputChannels > 0 )
2930 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
2933 drivers.removeCurrentDriver();
2937 static void bufferSwitch( long index, ASIOBool /*processNow*/ )
2939 RtApiAsio *object = (RtApiAsio *) asioCallbackInfo->object;
2940 object->callbackEvent( index );
2943 void RtApiAsio :: saveDeviceInfo( void )
2947 unsigned int nDevices = getDeviceCount();
2948 devices_.resize( nDevices );
2949 for ( unsigned int i=0; i<nDevices; i++ )
2950 devices_[i] = getDeviceInfo( i );
2953 bool RtApiAsio :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
2954 unsigned int firstChannel, unsigned int sampleRate,
2955 RtAudioFormat format, unsigned int *bufferSize,
2956 RtAudio::StreamOptions *options )
2957 {////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
2959 bool isDuplexInput = mode == INPUT && stream_.mode == OUTPUT;
2961 // For ASIO, a duplex stream MUST use the same driver.
2962 if ( isDuplexInput && stream_.device[0] != device ) {
2963 errorText_ = "RtApiAsio::probeDeviceOpen: an ASIO duplex stream must use the same device for input and output!";
2967 char driverName[32];
2968 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
2969 if ( result != ASE_OK ) {
2970 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to get driver name (" << getAsioErrorString( result ) << ").";
2971 errorText_ = errorStream_.str();
2975 // Only load the driver once for duplex stream.
2976 if ( !isDuplexInput ) {
2977 // The getDeviceInfo() function will not work when a stream is open
2978 // because ASIO does not allow multiple devices to run at the same
2979 // time. Thus, we'll probe the system before opening a stream and
2980 // save the results for use by getDeviceInfo().
2981 this->saveDeviceInfo();
2983 if ( !drivers.loadDriver( driverName ) ) {
2984 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to load driver (" << driverName << ").";
2985 errorText_ = errorStream_.str();
2989 result = ASIOInit( &driverInfo );
2990 if ( result != ASE_OK ) {
2991 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
2992 errorText_ = errorStream_.str();
2997 // keep them before any "goto error", they are used for error cleanup + goto device boundary checks
2998 bool buffersAllocated = false;
2999 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3000 unsigned int nChannels;
3003 // Check the device channel count.
3004 long inputChannels, outputChannels;
3005 result = ASIOGetChannels( &inputChannels, &outputChannels );
3006 if ( result != ASE_OK ) {
3007 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
3008 errorText_ = errorStream_.str();
3012 if ( ( mode == OUTPUT && (channels+firstChannel) > (unsigned int) outputChannels) ||
3013 ( mode == INPUT && (channels+firstChannel) > (unsigned int) inputChannels) ) {
3014 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested channel count (" << channels << ") + offset (" << firstChannel << ").";
3015 errorText_ = errorStream_.str();
3018 stream_.nDeviceChannels[mode] = channels;
3019 stream_.nUserChannels[mode] = channels;
3020 stream_.channelOffset[mode] = firstChannel;
3022 // Verify the sample rate is supported.
3023 result = ASIOCanSampleRate( (ASIOSampleRate) sampleRate );
3024 if ( result != ASE_OK ) {
3025 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested sample rate (" << sampleRate << ").";
3026 errorText_ = errorStream_.str();
3030 // Get the current sample rate
3031 ASIOSampleRate currentRate;
3032 result = ASIOGetSampleRate( ¤tRate );
3033 if ( result != ASE_OK ) {
3034 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error getting sample rate.";
3035 errorText_ = errorStream_.str();
3039 // Set the sample rate only if necessary
3040 if ( currentRate != sampleRate ) {
3041 result = ASIOSetSampleRate( (ASIOSampleRate) sampleRate );
3042 if ( result != ASE_OK ) {
3043 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error setting sample rate (" << sampleRate << ").";
3044 errorText_ = errorStream_.str();
3049 // Determine the driver data type.
3050 ASIOChannelInfo channelInfo;
3051 channelInfo.channel = 0;
3052 if ( mode == OUTPUT ) channelInfo.isInput = false;
3053 else channelInfo.isInput = true;
3054 result = ASIOGetChannelInfo( &channelInfo );
3055 if ( result != ASE_OK ) {
3056 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting data format.";
3057 errorText_ = errorStream_.str();
3061 // Assuming WINDOWS host is always little-endian.
3062 stream_.doByteSwap[mode] = false;
3063 stream_.userFormat = format;
3064 stream_.deviceFormat[mode] = 0;
3065 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB ) {
3066 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
3067 if ( channelInfo.type == ASIOSTInt16MSB ) stream_.doByteSwap[mode] = true;
3069 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB ) {
3070 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
3071 if ( channelInfo.type == ASIOSTInt32MSB ) stream_.doByteSwap[mode] = true;
3073 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB ) {
3074 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
3075 if ( channelInfo.type == ASIOSTFloat32MSB ) stream_.doByteSwap[mode] = true;
3077 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB ) {
3078 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
3079 if ( channelInfo.type == ASIOSTFloat64MSB ) stream_.doByteSwap[mode] = true;
3081 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB ) {
3082 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
3083 if ( channelInfo.type == ASIOSTInt24MSB ) stream_.doByteSwap[mode] = true;
3086 if ( stream_.deviceFormat[mode] == 0 ) {
3087 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") data format not supported by RtAudio.";
3088 errorText_ = errorStream_.str();
3092 // Set the buffer size. For a duplex stream, this will end up
3093 // setting the buffer size based on the input constraints, which
3095 long minSize, maxSize, preferSize, granularity;
3096 result = ASIOGetBufferSize( &minSize, &maxSize, &preferSize, &granularity );
3097 if ( result != ASE_OK ) {
3098 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting buffer size.";
3099 errorText_ = errorStream_.str();
3103 if ( isDuplexInput ) {
3104 // When this is the duplex input (output was opened before), then we have to use the same
3105 // buffersize as the output, because it might use the preferred buffer size, which most
3106 // likely wasn't passed as input to this. The buffer sizes have to be identically anyway,
3107 // So instead of throwing an error, make them equal. The caller uses the reference
3108 // to the "bufferSize" param as usual to set up processing buffers.
3110 *bufferSize = stream_.bufferSize;
3113 if ( *bufferSize == 0 ) *bufferSize = preferSize;
3114 else if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
3115 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
3116 else if ( granularity == -1 ) {
3117 // Make sure bufferSize is a power of two.
3118 int log2_of_min_size = 0;
3119 int log2_of_max_size = 0;
3121 for ( unsigned int i = 0; i < sizeof(long) * 8; i++ ) {
3122 if ( minSize & ((long)1 << i) ) log2_of_min_size = i;
3123 if ( maxSize & ((long)1 << i) ) log2_of_max_size = i;
3126 long min_delta = std::abs( (long)*bufferSize - ((long)1 << log2_of_min_size) );
3127 int min_delta_num = log2_of_min_size;
3129 for (int i = log2_of_min_size + 1; i <= log2_of_max_size; i++) {
3130 long current_delta = std::abs( (long)*bufferSize - ((long)1 << i) );
3131 if (current_delta < min_delta) {
3132 min_delta = current_delta;
3137 *bufferSize = ( (unsigned int)1 << min_delta_num );
3138 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
3139 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
3141 else if ( granularity != 0 ) {
3142 // Set to an even multiple of granularity, rounding up.
3143 *bufferSize = (*bufferSize + granularity-1) / granularity * granularity;
3148 // we don't use it anymore, see above!
3149 // Just left it here for the case...
3150 if ( isDuplexInput && stream_.bufferSize != *bufferSize ) {
3151 errorText_ = "RtApiAsio::probeDeviceOpen: input/output buffersize discrepancy!";
3156 stream_.bufferSize = *bufferSize;
3157 stream_.nBuffers = 2;
3159 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
3160 else stream_.userInterleaved = true;
3162 // ASIO always uses non-interleaved buffers.
3163 stream_.deviceInterleaved[mode] = false;
3165 // Allocate, if necessary, our AsioHandle structure for the stream.
3166 if ( handle == 0 ) {
3168 handle = new AsioHandle;
3170 catch ( std::bad_alloc& ) {
3171 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating AsioHandle memory.";
3174 handle->bufferInfos = 0;
3176 // Create a manual-reset event.
3177 handle->condition = CreateEvent( NULL, // no security
3178 TRUE, // manual-reset
3179 FALSE, // non-signaled initially
3181 stream_.apiHandle = (void *) handle;
3184 // Create the ASIO internal buffers. Since RtAudio sets up input
3185 // and output separately, we'll have to dispose of previously
3186 // created output buffers for a duplex stream.
3187 if ( mode == INPUT && stream_.mode == OUTPUT ) {
3188 ASIODisposeBuffers();
3189 if ( handle->bufferInfos ) free( handle->bufferInfos );
3192 // Allocate, initialize, and save the bufferInfos in our stream callbackInfo structure.
3194 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
3195 handle->bufferInfos = (ASIOBufferInfo *) malloc( nChannels * sizeof(ASIOBufferInfo) );
3196 if ( handle->bufferInfos == NULL ) {
3197 errorStream_ << "RtApiAsio::probeDeviceOpen: error allocating bufferInfo memory for driver (" << driverName << ").";
3198 errorText_ = errorStream_.str();
3202 ASIOBufferInfo *infos;
3203 infos = handle->bufferInfos;
3204 for ( i=0; i<stream_.nDeviceChannels[0]; i++, infos++ ) {
3205 infos->isInput = ASIOFalse;
3206 infos->channelNum = i + stream_.channelOffset[0];
3207 infos->buffers[0] = infos->buffers[1] = 0;
3209 for ( i=0; i<stream_.nDeviceChannels[1]; i++, infos++ ) {
3210 infos->isInput = ASIOTrue;
3211 infos->channelNum = i + stream_.channelOffset[1];
3212 infos->buffers[0] = infos->buffers[1] = 0;
3215 // prepare for callbacks
3216 stream_.sampleRate = sampleRate;
3217 stream_.device[mode] = device;
3218 stream_.mode = isDuplexInput ? DUPLEX : mode;
3220 // store this class instance before registering callbacks, that are going to use it
3221 asioCallbackInfo = &stream_.callbackInfo;
3222 stream_.callbackInfo.object = (void *) this;
3224 // Set up the ASIO callback structure and create the ASIO data buffers.
3225 asioCallbacks.bufferSwitch = &bufferSwitch;
3226 asioCallbacks.sampleRateDidChange = &sampleRateChanged;
3227 asioCallbacks.asioMessage = &asioMessages;
3228 asioCallbacks.bufferSwitchTimeInfo = NULL;
3229 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
3230 if ( result != ASE_OK ) {
3231 // Standard method failed. This can happen with strict/misbehaving drivers that return valid buffer size ranges
3232 // but only accept the preferred buffer size as parameter for ASIOCreateBuffers (e.g. Creative's ASIO driver).
3233 // In that case, let's be naïve and try that instead.
3234 *bufferSize = preferSize;
3235 stream_.bufferSize = *bufferSize;
3236 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
3239 if ( result != ASE_OK ) {
3240 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") creating buffers.";
3241 errorText_ = errorStream_.str();
3244 buffersAllocated = true;
3245 stream_.state = STREAM_STOPPED;
3247 // Set flags for buffer conversion.
3248 stream_.doConvertBuffer[mode] = false;
3249 if ( stream_.userFormat != stream_.deviceFormat[mode] )
3250 stream_.doConvertBuffer[mode] = true;
3251 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
3252 stream_.nUserChannels[mode] > 1 )
3253 stream_.doConvertBuffer[mode] = true;
3255 // Allocate necessary internal buffers
3256 unsigned long bufferBytes;
3257 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
3258 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
3259 if ( stream_.userBuffer[mode] == NULL ) {
3260 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating user buffer memory.";
3264 if ( stream_.doConvertBuffer[mode] ) {
3266 bool makeBuffer = true;
3267 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
3268 if ( isDuplexInput && stream_.deviceBuffer ) {
3269 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
3270 if ( bufferBytes <= bytesOut ) makeBuffer = false;
3274 bufferBytes *= *bufferSize;
3275 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
3276 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
3277 if ( stream_.deviceBuffer == NULL ) {
3278 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating device buffer memory.";
3284 // Determine device latencies
3285 long inputLatency, outputLatency;
3286 result = ASIOGetLatencies( &inputLatency, &outputLatency );
3287 if ( result != ASE_OK ) {
3288 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting latency.";
3289 errorText_ = errorStream_.str();
3290 error( RtAudioError::WARNING); // warn but don't fail
3293 stream_.latency[0] = outputLatency;
3294 stream_.latency[1] = inputLatency;
3297 // Setup the buffer conversion information structure. We don't use
3298 // buffers to do channel offsets, so we override that parameter
3300 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
3305 if ( !isDuplexInput ) {
3306 // the cleanup for error in the duplex input, is done by RtApi::openStream
3307 // So we clean up for single channel only
3309 if ( buffersAllocated )
3310 ASIODisposeBuffers();
3312 drivers.removeCurrentDriver();
3315 CloseHandle( handle->condition );
3316 if ( handle->bufferInfos )
3317 free( handle->bufferInfos );
3320 stream_.apiHandle = 0;
3324 if ( stream_.userBuffer[mode] ) {
3325 free( stream_.userBuffer[mode] );
3326 stream_.userBuffer[mode] = 0;
3329 if ( stream_.deviceBuffer ) {
3330 free( stream_.deviceBuffer );
3331 stream_.deviceBuffer = 0;
3336 }////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
3338 void RtApiAsio :: closeStream()
3340 if ( stream_.state == STREAM_CLOSED ) {
3341 errorText_ = "RtApiAsio::closeStream(): no open stream to close!";
3342 error( RtAudioError::WARNING );
3346 if ( stream_.state == STREAM_RUNNING ) {
3347 stream_.state = STREAM_STOPPED;
3350 ASIODisposeBuffers();
3351 drivers.removeCurrentDriver();
3353 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3355 CloseHandle( handle->condition );
3356 if ( handle->bufferInfos )
3357 free( handle->bufferInfos );
3359 stream_.apiHandle = 0;
3362 for ( int i=0; i<2; i++ ) {
3363 if ( stream_.userBuffer[i] ) {
3364 free( stream_.userBuffer[i] );
3365 stream_.userBuffer[i] = 0;
3369 if ( stream_.deviceBuffer ) {
3370 free( stream_.deviceBuffer );
3371 stream_.deviceBuffer = 0;
3374 stream_.mode = UNINITIALIZED;
3375 stream_.state = STREAM_CLOSED;
3378 bool stopThreadCalled = false;
3380 void RtApiAsio :: startStream()
3383 if ( stream_.state == STREAM_RUNNING ) {
3384 errorText_ = "RtApiAsio::startStream(): the stream is already running!";
3385 error( RtAudioError::WARNING );
3389 #if defined( HAVE_GETTIMEOFDAY )
3390 gettimeofday( &stream_.lastTickTimestamp, NULL );
3393 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3394 ASIOError result = ASIOStart();
3395 if ( result != ASE_OK ) {
3396 errorStream_ << "RtApiAsio::startStream: error (" << getAsioErrorString( result ) << ") starting device.";
3397 errorText_ = errorStream_.str();
3401 handle->drainCounter = 0;
3402 handle->internalDrain = false;
3403 ResetEvent( handle->condition );
3404 stream_.state = STREAM_RUNNING;
3408 stopThreadCalled = false;
3410 if ( result == ASE_OK ) return;
3411 error( RtAudioError::SYSTEM_ERROR );
3414 void RtApiAsio :: stopStream()
3417 if ( stream_.state == STREAM_STOPPED ) {
3418 errorText_ = "RtApiAsio::stopStream(): the stream is already stopped!";
3419 error( RtAudioError::WARNING );
3423 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3424 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
3425 if ( handle->drainCounter == 0 ) {
3426 handle->drainCounter = 2;
3427 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
3431 stream_.state = STREAM_STOPPED;
3433 ASIOError result = ASIOStop();
3434 if ( result != ASE_OK ) {
3435 errorStream_ << "RtApiAsio::stopStream: error (" << getAsioErrorString( result ) << ") stopping device.";
3436 errorText_ = errorStream_.str();
3439 if ( result == ASE_OK ) return;
3440 error( RtAudioError::SYSTEM_ERROR );
3443 void RtApiAsio :: abortStream()
3446 if ( stream_.state == STREAM_STOPPED ) {
3447 errorText_ = "RtApiAsio::abortStream(): the stream is already stopped!";
3448 error( RtAudioError::WARNING );
3452 // The following lines were commented-out because some behavior was
3453 // noted where the device buffers need to be zeroed to avoid
3454 // continuing sound, even when the device buffers are completely
3455 // disposed. So now, calling abort is the same as calling stop.
3456 // AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3457 // handle->drainCounter = 2;
3461 // This function will be called by a spawned thread when the user
3462 // callback function signals that the stream should be stopped or
3463 // aborted. It is necessary to handle it this way because the
3464 // callbackEvent() function must return before the ASIOStop()
3465 // function will return.
3466 static unsigned __stdcall asioStopStream( void *ptr )
3468 CallbackInfo *info = (CallbackInfo *) ptr;
3469 RtApiAsio *object = (RtApiAsio *) info->object;
3471 object->stopStream();
3476 bool RtApiAsio :: callbackEvent( long bufferIndex )
3478 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
3479 if ( stream_.state == STREAM_CLOSED ) {
3480 errorText_ = "RtApiAsio::callbackEvent(): the stream is closed ... this shouldn't happen!";
3481 error( RtAudioError::WARNING );
3485 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
3486 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3488 // Check if we were draining the stream and signal if finished.
3489 if ( handle->drainCounter > 3 ) {
3491 stream_.state = STREAM_STOPPING;
3492 if ( handle->internalDrain == false )
3493 SetEvent( handle->condition );
3494 else { // spawn a thread to stop the stream
3496 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
3497 &stream_.callbackInfo, 0, &threadId );
3502 // Invoke user callback to get fresh output data UNLESS we are
3504 if ( handle->drainCounter == 0 ) {
3505 RtAudioCallback callback = (RtAudioCallback) info->callback;
3506 double streamTime = getStreamTime();
3507 RtAudioStreamStatus status = 0;
3508 if ( stream_.mode != INPUT && asioXRun == true ) {
3509 status |= RTAUDIO_OUTPUT_UNDERFLOW;
3512 if ( stream_.mode != OUTPUT && asioXRun == true ) {
3513 status |= RTAUDIO_INPUT_OVERFLOW;
3516 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
3517 stream_.bufferSize, streamTime, status, info->userData );
3518 if ( cbReturnValue == 2 ) {
3519 stream_.state = STREAM_STOPPING;
3520 handle->drainCounter = 2;
3522 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
3523 &stream_.callbackInfo, 0, &threadId );
3526 else if ( cbReturnValue == 1 ) {
3527 handle->drainCounter = 1;
3528 handle->internalDrain = true;
3532 unsigned int nChannels, bufferBytes, i, j;
3533 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
3534 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
3536 bufferBytes = stream_.bufferSize * formatBytes( stream_.deviceFormat[0] );
3538 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
3540 for ( i=0, j=0; i<nChannels; i++ ) {
3541 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3542 memset( handle->bufferInfos[i].buffers[bufferIndex], 0, bufferBytes );
3546 else if ( stream_.doConvertBuffer[0] ) {
3548 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
3549 if ( stream_.doByteSwap[0] )
3550 byteSwapBuffer( stream_.deviceBuffer,
3551 stream_.bufferSize * stream_.nDeviceChannels[0],
3552 stream_.deviceFormat[0] );
3554 for ( i=0, j=0; i<nChannels; i++ ) {
3555 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3556 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
3557 &stream_.deviceBuffer[j++*bufferBytes], bufferBytes );
3563 if ( stream_.doByteSwap[0] )
3564 byteSwapBuffer( stream_.userBuffer[0],
3565 stream_.bufferSize * stream_.nUserChannels[0],
3566 stream_.userFormat );
3568 for ( i=0, j=0; i<nChannels; i++ ) {
3569 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3570 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
3571 &stream_.userBuffer[0][bufferBytes*j++], bufferBytes );
3577 // Don't bother draining input
3578 if ( handle->drainCounter ) {
3579 handle->drainCounter++;
3583 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
3585 bufferBytes = stream_.bufferSize * formatBytes(stream_.deviceFormat[1]);
3587 if (stream_.doConvertBuffer[1]) {
3589 // Always interleave ASIO input data.
3590 for ( i=0, j=0; i<nChannels; i++ ) {
3591 if ( handle->bufferInfos[i].isInput == ASIOTrue )
3592 memcpy( &stream_.deviceBuffer[j++*bufferBytes],
3593 handle->bufferInfos[i].buffers[bufferIndex],
3597 if ( stream_.doByteSwap[1] )
3598 byteSwapBuffer( stream_.deviceBuffer,
3599 stream_.bufferSize * stream_.nDeviceChannels[1],
3600 stream_.deviceFormat[1] );
3601 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
3605 for ( i=0, j=0; i<nChannels; i++ ) {
3606 if ( handle->bufferInfos[i].isInput == ASIOTrue ) {
3607 memcpy( &stream_.userBuffer[1][bufferBytes*j++],
3608 handle->bufferInfos[i].buffers[bufferIndex],
3613 if ( stream_.doByteSwap[1] )
3614 byteSwapBuffer( stream_.userBuffer[1],
3615 stream_.bufferSize * stream_.nUserChannels[1],
3616 stream_.userFormat );
3621 // The following call was suggested by Malte Clasen. While the API
3622 // documentation indicates it should not be required, some device
3623 // drivers apparently do not function correctly without it.
3626 RtApi::tickStreamTime();
3630 static void sampleRateChanged( ASIOSampleRate sRate )
3632 // The ASIO documentation says that this usually only happens during
3633 // external sync. Audio processing is not stopped by the driver,
3634 // actual sample rate might not have even changed, maybe only the
3635 // sample rate status of an AES/EBU or S/PDIF digital input at the
3638 RtApi *object = (RtApi *) asioCallbackInfo->object;
3640 object->stopStream();
3642 catch ( RtAudioError &exception ) {
3643 std::cerr << "\nRtApiAsio: sampleRateChanged() error (" << exception.getMessage() << ")!\n" << std::endl;
3647 std::cerr << "\nRtApiAsio: driver reports sample rate changed to " << sRate << " ... stream stopped!!!\n" << std::endl;
3650 static long asioMessages( long selector, long value, void* /*message*/, double* /*opt*/ )
3654 switch( selector ) {
3655 case kAsioSelectorSupported:
3656 if ( value == kAsioResetRequest
3657 || value == kAsioEngineVersion
3658 || value == kAsioResyncRequest
3659 || value == kAsioLatenciesChanged
3660 // The following three were added for ASIO 2.0, you don't
3661 // necessarily have to support them.
3662 || value == kAsioSupportsTimeInfo
3663 || value == kAsioSupportsTimeCode
3664 || value == kAsioSupportsInputMonitor)
3667 case kAsioResetRequest:
3668 // Defer the task and perform the reset of the driver during the
3669 // next "safe" situation. You cannot reset the driver right now,
3670 // as this code is called from the driver. Reset the driver is
3671 // done by completely destruct is. I.e. ASIOStop(),
3672 // ASIODisposeBuffers(), Destruction Afterwards you initialize the
3674 std::cerr << "\nRtApiAsio: driver reset requested!!!" << std::endl;
3677 case kAsioResyncRequest:
3678 // This informs the application that the driver encountered some
3679 // non-fatal data loss. It is used for synchronization purposes
3680 // of different media. Added mainly to work around the Win16Mutex
3681 // problems in Windows 95/98 with the Windows Multimedia system,
3682 // which could lose data because the Mutex was held too long by
3683 // another thread. However a driver can issue it in other
3685 // std::cerr << "\nRtApiAsio: driver resync requested!!!" << std::endl;
3689 case kAsioLatenciesChanged:
3690 // This will inform the host application that the drivers were
3691 // latencies changed. Beware, it this does not mean that the
3692 // buffer sizes have changed! You might need to update internal
3694 std::cerr << "\nRtApiAsio: driver latency may have changed!!!" << std::endl;
3697 case kAsioEngineVersion:
3698 // Return the supported ASIO version of the host application. If
3699 // a host application does not implement this selector, ASIO 1.0
3700 // is assumed by the driver.
3703 case kAsioSupportsTimeInfo:
3704 // Informs the driver whether the
3705 // asioCallbacks.bufferSwitchTimeInfo() callback is supported.
3706 // For compatibility with ASIO 1.0 drivers the host application
3707 // should always support the "old" bufferSwitch method, too.
3710 case kAsioSupportsTimeCode:
3711 // Informs the driver whether application is interested in time
3712 // code info. If an application does not need to know about time
3713 // code, the driver has less work to do.
3720 static const char* getAsioErrorString( ASIOError result )
3728 static const Messages m[] =
3730 { ASE_NotPresent, "Hardware input or output is not present or available." },
3731 { ASE_HWMalfunction, "Hardware is malfunctioning." },
3732 { ASE_InvalidParameter, "Invalid input parameter." },
3733 { ASE_InvalidMode, "Invalid mode." },
3734 { ASE_SPNotAdvancing, "Sample position not advancing." },
3735 { ASE_NoClock, "Sample clock or rate cannot be determined or is not present." },
3736 { ASE_NoMemory, "Not enough memory to complete the request." }
3739 for ( unsigned int i = 0; i < sizeof(m)/sizeof(m[0]); ++i )
3740 if ( m[i].value == result ) return m[i].message;
3742 return "Unknown error.";
3745 //******************** End of __WINDOWS_ASIO__ *********************//
3749 #if defined(__WINDOWS_WASAPI__) // Windows WASAPI API
3751 // Authored by Marcus Tomlinson <themarcustomlinson@gmail.com>, April 2014
3752 // - Introduces support for the Windows WASAPI API
3753 // - Aims to deliver bit streams to and from hardware at the lowest possible latency, via the absolute minimum buffer sizes required
3754 // - Provides flexible stream configuration to an otherwise strict and inflexible WASAPI interface
3755 // - Includes automatic internal conversion of sample rate and buffer size between hardware and the user
3762 #include <mferror.h>
3764 #include <mftransform.h>
3765 #include <wmcodecdsp.h>
3767 #include <audioclient.h>
3769 #include <mmdeviceapi.h>
3770 #include <functiondiscoverykeys_devpkey.h>
3772 #ifndef MF_E_TRANSFORM_NEED_MORE_INPUT
3773 #define MF_E_TRANSFORM_NEED_MORE_INPUT _HRESULT_TYPEDEF_(0xc00d6d72)
3776 #ifndef MFSTARTUP_NOSOCKET
3777 #define MFSTARTUP_NOSOCKET 0x1
3781 #pragma comment( lib, "ksuser" )
3782 #pragma comment( lib, "mfplat.lib" )
3783 #pragma comment( lib, "mfuuid.lib" )
3784 #pragma comment( lib, "wmcodecdspuuid" )
3787 //=============================================================================
3789 #define SAFE_RELEASE( objectPtr )\
3792 objectPtr->Release();\
3796 typedef HANDLE ( __stdcall *TAvSetMmThreadCharacteristicsPtr )( LPCWSTR TaskName, LPDWORD TaskIndex );
3798 //-----------------------------------------------------------------------------
3800 // WASAPI dictates stream sample rate, format, channel count, and in some cases, buffer size.
3801 // Therefore we must perform all necessary conversions to user buffers in order to satisfy these
3802 // requirements. WasapiBuffer ring buffers are used between HwIn->UserIn and UserOut->HwOut to
3803 // provide intermediate storage for read / write synchronization.
3817 // sets the length of the internal ring buffer
3818 void setBufferSize( unsigned int bufferSize, unsigned int formatBytes ) {
3821 buffer_ = ( char* ) calloc( bufferSize, formatBytes );
3823 bufferSize_ = bufferSize;
3828 // attempt to push a buffer into the ring buffer at the current "in" index
3829 bool pushBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
3831 if ( !buffer || // incoming buffer is NULL
3832 bufferSize == 0 || // incoming buffer has no data
3833 bufferSize > bufferSize_ ) // incoming buffer too large
3838 unsigned int relOutIndex = outIndex_;
3839 unsigned int inIndexEnd = inIndex_ + bufferSize;
3840 if ( relOutIndex < inIndex_ && inIndexEnd >= bufferSize_ ) {
3841 relOutIndex += bufferSize_;
3844 // "in" index can end on the "out" index but cannot begin at it
3845 if ( inIndex_ <= relOutIndex && inIndexEnd > relOutIndex ) {
3846 return false; // not enough space between "in" index and "out" index
3849 // copy buffer from external to internal
3850 int fromZeroSize = inIndex_ + bufferSize - bufferSize_;
3851 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
3852 int fromInSize = bufferSize - fromZeroSize;
3857 memcpy( &( ( char* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( char ) );
3858 memcpy( buffer_, &( ( char* ) buffer )[fromInSize], fromZeroSize * sizeof( char ) );
3860 case RTAUDIO_SINT16:
3861 memcpy( &( ( short* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( short ) );
3862 memcpy( buffer_, &( ( short* ) buffer )[fromInSize], fromZeroSize * sizeof( short ) );
3864 case RTAUDIO_SINT24:
3865 memcpy( &( ( S24* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( S24 ) );
3866 memcpy( buffer_, &( ( S24* ) buffer )[fromInSize], fromZeroSize * sizeof( S24 ) );
3868 case RTAUDIO_SINT32:
3869 memcpy( &( ( int* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( int ) );
3870 memcpy( buffer_, &( ( int* ) buffer )[fromInSize], fromZeroSize * sizeof( int ) );
3872 case RTAUDIO_FLOAT32:
3873 memcpy( &( ( float* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( float ) );
3874 memcpy( buffer_, &( ( float* ) buffer )[fromInSize], fromZeroSize * sizeof( float ) );
3876 case RTAUDIO_FLOAT64:
3877 memcpy( &( ( double* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( double ) );
3878 memcpy( buffer_, &( ( double* ) buffer )[fromInSize], fromZeroSize * sizeof( double ) );
3882 // update "in" index
3883 inIndex_ += bufferSize;
3884 inIndex_ %= bufferSize_;
3889 // attempt to pull a buffer from the ring buffer from the current "out" index
3890 bool pullBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
3892 if ( !buffer || // incoming buffer is NULL
3893 bufferSize == 0 || // incoming buffer has no data
3894 bufferSize > bufferSize_ ) // incoming buffer too large
3899 unsigned int relInIndex = inIndex_;
3900 unsigned int outIndexEnd = outIndex_ + bufferSize;
3901 if ( relInIndex < outIndex_ && outIndexEnd >= bufferSize_ ) {
3902 relInIndex += bufferSize_;
3905 // "out" index can begin at and end on the "in" index
3906 if ( outIndex_ < relInIndex && outIndexEnd > relInIndex ) {
3907 return false; // not enough space between "out" index and "in" index
3910 // copy buffer from internal to external
3911 int fromZeroSize = outIndex_ + bufferSize - bufferSize_;
3912 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
3913 int fromOutSize = bufferSize - fromZeroSize;
3918 memcpy( buffer, &( ( char* ) buffer_ )[outIndex_], fromOutSize * sizeof( char ) );
3919 memcpy( &( ( char* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( char ) );
3921 case RTAUDIO_SINT16:
3922 memcpy( buffer, &( ( short* ) buffer_ )[outIndex_], fromOutSize * sizeof( short ) );
3923 memcpy( &( ( short* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( short ) );
3925 case RTAUDIO_SINT24:
3926 memcpy( buffer, &( ( S24* ) buffer_ )[outIndex_], fromOutSize * sizeof( S24 ) );
3927 memcpy( &( ( S24* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( S24 ) );
3929 case RTAUDIO_SINT32:
3930 memcpy( buffer, &( ( int* ) buffer_ )[outIndex_], fromOutSize * sizeof( int ) );
3931 memcpy( &( ( int* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( int ) );
3933 case RTAUDIO_FLOAT32:
3934 memcpy( buffer, &( ( float* ) buffer_ )[outIndex_], fromOutSize * sizeof( float ) );
3935 memcpy( &( ( float* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( float ) );
3937 case RTAUDIO_FLOAT64:
3938 memcpy( buffer, &( ( double* ) buffer_ )[outIndex_], fromOutSize * sizeof( double ) );
3939 memcpy( &( ( double* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( double ) );
3943 // update "out" index
3944 outIndex_ += bufferSize;
3945 outIndex_ %= bufferSize_;
3952 unsigned int bufferSize_;
3953 unsigned int inIndex_;
3954 unsigned int outIndex_;
3957 //-----------------------------------------------------------------------------
3959 // In order to satisfy WASAPI's buffer requirements, we need a means of converting sample rate
3960 // between HW and the user. The WasapiResampler class is used to perform this conversion between
3961 // HwIn->UserIn and UserOut->HwOut during the stream callback loop.
3962 class WasapiResampler
3965 WasapiResampler( bool isFloat, unsigned int bitsPerSample, unsigned int channelCount,
3966 unsigned int inSampleRate, unsigned int outSampleRate )
3967 : _bytesPerSample( bitsPerSample / 8 )
3968 , _channelCount( channelCount )
3969 , _sampleRatio( ( float ) outSampleRate / inSampleRate )
3970 , _transformUnk( NULL )
3971 , _transform( NULL )
3972 , _mediaType( NULL )
3973 , _inputMediaType( NULL )
3974 , _outputMediaType( NULL )
3976 #ifdef __IWMResamplerProps_FWD_DEFINED__
3977 , _resamplerProps( NULL )
3980 // 1. Initialization
3982 MFStartup( MF_VERSION, MFSTARTUP_NOSOCKET );
3984 // 2. Create Resampler Transform Object
3986 CoCreateInstance( CLSID_CResamplerMediaObject, NULL, CLSCTX_INPROC_SERVER,
3987 IID_IUnknown, ( void** ) &_transformUnk );
3989 _transformUnk->QueryInterface( IID_PPV_ARGS( &_transform ) );
3991 #ifdef __IWMResamplerProps_FWD_DEFINED__
3992 _transformUnk->QueryInterface( IID_PPV_ARGS( &_resamplerProps ) );
3993 _resamplerProps->SetHalfFilterLength( 60 ); // best conversion quality
3996 // 3. Specify input / output format
3998 MFCreateMediaType( &_mediaType );
3999 _mediaType->SetGUID( MF_MT_MAJOR_TYPE, MFMediaType_Audio );
4000 _mediaType->SetGUID( MF_MT_SUBTYPE, isFloat ? MFAudioFormat_Float : MFAudioFormat_PCM );
4001 _mediaType->SetUINT32( MF_MT_AUDIO_NUM_CHANNELS, channelCount );
4002 _mediaType->SetUINT32( MF_MT_AUDIO_SAMPLES_PER_SECOND, inSampleRate );
4003 _mediaType->SetUINT32( MF_MT_AUDIO_BLOCK_ALIGNMENT, _bytesPerSample * channelCount );
4004 _mediaType->SetUINT32( MF_MT_AUDIO_AVG_BYTES_PER_SECOND, _bytesPerSample * channelCount * inSampleRate );
4005 _mediaType->SetUINT32( MF_MT_AUDIO_BITS_PER_SAMPLE, bitsPerSample );
4006 _mediaType->SetUINT32( MF_MT_ALL_SAMPLES_INDEPENDENT, TRUE );
4008 MFCreateMediaType( &_inputMediaType );
4009 _mediaType->CopyAllItems( _inputMediaType );
4011 _transform->SetInputType( 0, _inputMediaType, 0 );
4013 MFCreateMediaType( &_outputMediaType );
4014 _mediaType->CopyAllItems( _outputMediaType );
4016 _outputMediaType->SetUINT32( MF_MT_AUDIO_SAMPLES_PER_SECOND, outSampleRate );
4017 _outputMediaType->SetUINT32( MF_MT_AUDIO_AVG_BYTES_PER_SECOND, _bytesPerSample * channelCount * outSampleRate );
4019 _transform->SetOutputType( 0, _outputMediaType, 0 );
4021 // 4. Send stream start messages to Resampler
4023 _transform->ProcessMessage( MFT_MESSAGE_COMMAND_FLUSH, 0 );
4024 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_BEGIN_STREAMING, 0 );
4025 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_START_OF_STREAM, 0 );
4030 // 8. Send stream stop messages to Resampler
4032 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_END_OF_STREAM, 0 );
4033 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_END_STREAMING, 0 );
4039 SAFE_RELEASE( _transformUnk );
4040 SAFE_RELEASE( _transform );
4041 SAFE_RELEASE( _mediaType );
4042 SAFE_RELEASE( _inputMediaType );
4043 SAFE_RELEASE( _outputMediaType );
4045 #ifdef __IWMResamplerProps_FWD_DEFINED__
4046 SAFE_RELEASE( _resamplerProps );
4050 void Convert( char* outBuffer, const char* inBuffer, unsigned int inSampleCount, unsigned int& outSampleCount )
4052 unsigned int inputBufferSize = _bytesPerSample * _channelCount * inSampleCount;
4053 if ( _sampleRatio == 1 )
4055 // no sample rate conversion required
4056 memcpy( outBuffer, inBuffer, inputBufferSize );
4057 outSampleCount = inSampleCount;
4061 unsigned int outputBufferSize = ( unsigned int ) ceilf( inputBufferSize * _sampleRatio ) + ( _bytesPerSample * _channelCount );
4063 IMFMediaBuffer* rInBuffer;
4064 IMFSample* rInSample;
4065 BYTE* rInByteBuffer = NULL;
4067 // 5. Create Sample object from input data
4069 MFCreateMemoryBuffer( inputBufferSize, &rInBuffer );
4071 rInBuffer->Lock( &rInByteBuffer, NULL, NULL );
4072 memcpy( rInByteBuffer, inBuffer, inputBufferSize );
4073 rInBuffer->Unlock();
4074 rInByteBuffer = NULL;
4076 rInBuffer->SetCurrentLength( inputBufferSize );
4078 MFCreateSample( &rInSample );
4079 rInSample->AddBuffer( rInBuffer );
4081 // 6. Pass input data to Resampler
4083 _transform->ProcessInput( 0, rInSample, 0 );
4085 SAFE_RELEASE( rInBuffer );
4086 SAFE_RELEASE( rInSample );
4088 // 7. Perform sample rate conversion
4090 IMFMediaBuffer* rOutBuffer = NULL;
4091 BYTE* rOutByteBuffer = NULL;
4093 MFT_OUTPUT_DATA_BUFFER rOutDataBuffer;
4095 DWORD rBytes = outputBufferSize; // maximum bytes accepted per ProcessOutput
4097 // 7.1 Create Sample object for output data
4099 memset( &rOutDataBuffer, 0, sizeof rOutDataBuffer );
4100 MFCreateSample( &( rOutDataBuffer.pSample ) );
4101 MFCreateMemoryBuffer( rBytes, &rOutBuffer );
4102 rOutDataBuffer.pSample->AddBuffer( rOutBuffer );
4103 rOutDataBuffer.dwStreamID = 0;
4104 rOutDataBuffer.dwStatus = 0;
4105 rOutDataBuffer.pEvents = NULL;
4107 // 7.2 Get output data from Resampler
4109 if ( _transform->ProcessOutput( 0, 1, &rOutDataBuffer, &rStatus ) == MF_E_TRANSFORM_NEED_MORE_INPUT )
4112 SAFE_RELEASE( rOutBuffer );
4113 SAFE_RELEASE( rOutDataBuffer.pSample );
4117 // 7.3 Write output data to outBuffer
4119 SAFE_RELEASE( rOutBuffer );
4120 rOutDataBuffer.pSample->ConvertToContiguousBuffer( &rOutBuffer );
4121 rOutBuffer->GetCurrentLength( &rBytes );
4123 rOutBuffer->Lock( &rOutByteBuffer, NULL, NULL );
4124 memcpy( outBuffer, rOutByteBuffer, rBytes );
4125 rOutBuffer->Unlock();
4126 rOutByteBuffer = NULL;
4128 outSampleCount = rBytes / _bytesPerSample / _channelCount;
4129 SAFE_RELEASE( rOutBuffer );
4130 SAFE_RELEASE( rOutDataBuffer.pSample );
4134 unsigned int _bytesPerSample;
4135 unsigned int _channelCount;
4138 IUnknown* _transformUnk;
4139 IMFTransform* _transform;
4140 IMFMediaType* _mediaType;
4141 IMFMediaType* _inputMediaType;
4142 IMFMediaType* _outputMediaType;
4144 #ifdef __IWMResamplerProps_FWD_DEFINED__
4145 IWMResamplerProps* _resamplerProps;
4149 //-----------------------------------------------------------------------------
4151 // A structure to hold various information related to the WASAPI implementation.
4154 IAudioClient* captureAudioClient;
4155 IAudioClient* renderAudioClient;
4156 IAudioCaptureClient* captureClient;
4157 IAudioRenderClient* renderClient;
4158 HANDLE captureEvent;
4162 : captureAudioClient( NULL ),
4163 renderAudioClient( NULL ),
4164 captureClient( NULL ),
4165 renderClient( NULL ),
4166 captureEvent( NULL ),
4167 renderEvent( NULL ) {}
4170 //=============================================================================
4172 RtApiWasapi::RtApiWasapi()
4173 : coInitialized_( false ), deviceEnumerator_( NULL )
4175 // WASAPI can run either apartment or multi-threaded
4176 HRESULT hr = CoInitialize( NULL );
4177 if ( !FAILED( hr ) )
4178 coInitialized_ = true;
4180 // Instantiate device enumerator
4181 hr = CoCreateInstance( __uuidof( MMDeviceEnumerator ), NULL,
4182 CLSCTX_ALL, __uuidof( IMMDeviceEnumerator ),
4183 ( void** ) &deviceEnumerator_ );
4185 // If this runs on an old Windows, it will fail. Ignore and proceed.
4187 deviceEnumerator_ = NULL;
4190 //-----------------------------------------------------------------------------
4192 RtApiWasapi::~RtApiWasapi()
4194 if ( stream_.state != STREAM_CLOSED )
4197 SAFE_RELEASE( deviceEnumerator_ );
4199 // If this object previously called CoInitialize()
4200 if ( coInitialized_ )
4204 //=============================================================================
4206 unsigned int RtApiWasapi::getDeviceCount( void )
4208 unsigned int captureDeviceCount = 0;
4209 unsigned int renderDeviceCount = 0;
4211 IMMDeviceCollection* captureDevices = NULL;
4212 IMMDeviceCollection* renderDevices = NULL;
4214 if ( !deviceEnumerator_ )
4217 // Count capture devices
4219 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4220 if ( FAILED( hr ) ) {
4221 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device collection.";
4225 hr = captureDevices->GetCount( &captureDeviceCount );
4226 if ( FAILED( hr ) ) {
4227 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device count.";
4231 // Count render devices
4232 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4233 if ( FAILED( hr ) ) {
4234 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device collection.";
4238 hr = renderDevices->GetCount( &renderDeviceCount );
4239 if ( FAILED( hr ) ) {
4240 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device count.";
4245 // release all references
4246 SAFE_RELEASE( captureDevices );
4247 SAFE_RELEASE( renderDevices );
4249 if ( errorText_.empty() )
4250 return captureDeviceCount + renderDeviceCount;
4252 error( RtAudioError::DRIVER_ERROR );
4256 //-----------------------------------------------------------------------------
4258 RtAudio::DeviceInfo RtApiWasapi::getDeviceInfo( unsigned int device )
4260 RtAudio::DeviceInfo info;
4261 unsigned int captureDeviceCount = 0;
4262 unsigned int renderDeviceCount = 0;
4263 std::string defaultDeviceName;
4264 bool isCaptureDevice = false;
4266 PROPVARIANT deviceNameProp;
4267 PROPVARIANT defaultDeviceNameProp;
4269 IMMDeviceCollection* captureDevices = NULL;
4270 IMMDeviceCollection* renderDevices = NULL;
4271 IMMDevice* devicePtr = NULL;
4272 IMMDevice* defaultDevicePtr = NULL;
4273 IAudioClient* audioClient = NULL;
4274 IPropertyStore* devicePropStore = NULL;
4275 IPropertyStore* defaultDevicePropStore = NULL;
4277 WAVEFORMATEX* deviceFormat = NULL;
4278 WAVEFORMATEX* closestMatchFormat = NULL;
4281 info.probed = false;
4283 // Count capture devices
4285 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4286 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4287 if ( FAILED( hr ) ) {
4288 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device collection.";
4292 hr = captureDevices->GetCount( &captureDeviceCount );
4293 if ( FAILED( hr ) ) {
4294 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device count.";
4298 // Count render devices
4299 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4300 if ( FAILED( hr ) ) {
4301 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device collection.";
4305 hr = renderDevices->GetCount( &renderDeviceCount );
4306 if ( FAILED( hr ) ) {
4307 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device count.";
4311 // validate device index
4312 if ( device >= captureDeviceCount + renderDeviceCount ) {
4313 errorText_ = "RtApiWasapi::getDeviceInfo: Invalid device index.";
4314 errorType = RtAudioError::INVALID_USE;
4318 // determine whether index falls within capture or render devices
4319 if ( device >= renderDeviceCount ) {
4320 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
4321 if ( FAILED( hr ) ) {
4322 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device handle.";
4325 isCaptureDevice = true;
4328 hr = renderDevices->Item( device, &devicePtr );
4329 if ( FAILED( hr ) ) {
4330 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device handle.";
4333 isCaptureDevice = false;
4336 // get default device name
4337 if ( isCaptureDevice ) {
4338 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eCapture, eConsole, &defaultDevicePtr );
4339 if ( FAILED( hr ) ) {
4340 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default capture device handle.";
4345 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eRender, eConsole, &defaultDevicePtr );
4346 if ( FAILED( hr ) ) {
4347 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default render device handle.";
4352 hr = defaultDevicePtr->OpenPropertyStore( STGM_READ, &defaultDevicePropStore );
4353 if ( FAILED( hr ) ) {
4354 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open default device property store.";
4357 PropVariantInit( &defaultDeviceNameProp );
4359 hr = defaultDevicePropStore->GetValue( PKEY_Device_FriendlyName, &defaultDeviceNameProp );
4360 if ( FAILED( hr ) ) {
4361 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default device property: PKEY_Device_FriendlyName.";
4365 defaultDeviceName = convertCharPointerToStdString(defaultDeviceNameProp.pwszVal);
4368 hr = devicePtr->OpenPropertyStore( STGM_READ, &devicePropStore );
4369 if ( FAILED( hr ) ) {
4370 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open device property store.";
4374 PropVariantInit( &deviceNameProp );
4376 hr = devicePropStore->GetValue( PKEY_Device_FriendlyName, &deviceNameProp );
4377 if ( FAILED( hr ) ) {
4378 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device property: PKEY_Device_FriendlyName.";
4382 info.name =convertCharPointerToStdString(deviceNameProp.pwszVal);
4385 if ( isCaptureDevice ) {
4386 info.isDefaultInput = info.name == defaultDeviceName;
4387 info.isDefaultOutput = false;
4390 info.isDefaultInput = false;
4391 info.isDefaultOutput = info.name == defaultDeviceName;
4395 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL, NULL, ( void** ) &audioClient );
4396 if ( FAILED( hr ) ) {
4397 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device audio client.";
4401 hr = audioClient->GetMixFormat( &deviceFormat );
4402 if ( FAILED( hr ) ) {
4403 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device mix format.";
4407 if ( isCaptureDevice ) {
4408 info.inputChannels = deviceFormat->nChannels;
4409 info.outputChannels = 0;
4410 info.duplexChannels = 0;
4413 info.inputChannels = 0;
4414 info.outputChannels = deviceFormat->nChannels;
4415 info.duplexChannels = 0;
4419 info.sampleRates.clear();
4421 // allow support for all sample rates as we have a built-in sample rate converter
4422 for ( unsigned int i = 0; i < MAX_SAMPLE_RATES; i++ ) {
4423 info.sampleRates.push_back( SAMPLE_RATES[i] );
4425 info.preferredSampleRate = deviceFormat->nSamplesPerSec;
4428 info.nativeFormats = 0;
4430 if ( deviceFormat->wFormatTag == WAVE_FORMAT_IEEE_FLOAT ||
4431 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
4432 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_IEEE_FLOAT ) )
4434 if ( deviceFormat->wBitsPerSample == 32 ) {
4435 info.nativeFormats |= RTAUDIO_FLOAT32;
4437 else if ( deviceFormat->wBitsPerSample == 64 ) {
4438 info.nativeFormats |= RTAUDIO_FLOAT64;
4441 else if ( deviceFormat->wFormatTag == WAVE_FORMAT_PCM ||
4442 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
4443 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_PCM ) )
4445 if ( deviceFormat->wBitsPerSample == 8 ) {
4446 info.nativeFormats |= RTAUDIO_SINT8;
4448 else if ( deviceFormat->wBitsPerSample == 16 ) {
4449 info.nativeFormats |= RTAUDIO_SINT16;
4451 else if ( deviceFormat->wBitsPerSample == 24 ) {
4452 info.nativeFormats |= RTAUDIO_SINT24;
4454 else if ( deviceFormat->wBitsPerSample == 32 ) {
4455 info.nativeFormats |= RTAUDIO_SINT32;
4463 // release all references
4464 PropVariantClear( &deviceNameProp );
4465 PropVariantClear( &defaultDeviceNameProp );
4467 SAFE_RELEASE( captureDevices );
4468 SAFE_RELEASE( renderDevices );
4469 SAFE_RELEASE( devicePtr );
4470 SAFE_RELEASE( defaultDevicePtr );
4471 SAFE_RELEASE( audioClient );
4472 SAFE_RELEASE( devicePropStore );
4473 SAFE_RELEASE( defaultDevicePropStore );
4475 CoTaskMemFree( deviceFormat );
4476 CoTaskMemFree( closestMatchFormat );
4478 if ( !errorText_.empty() )
4483 //-----------------------------------------------------------------------------
4485 unsigned int RtApiWasapi::getDefaultOutputDevice( void )
4487 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
4488 if ( getDeviceInfo( i ).isDefaultOutput ) {
4496 //-----------------------------------------------------------------------------
4498 unsigned int RtApiWasapi::getDefaultInputDevice( void )
4500 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
4501 if ( getDeviceInfo( i ).isDefaultInput ) {
4509 //-----------------------------------------------------------------------------
4511 void RtApiWasapi::closeStream( void )
4513 if ( stream_.state == STREAM_CLOSED ) {
4514 errorText_ = "RtApiWasapi::closeStream: No open stream to close.";
4515 error( RtAudioError::WARNING );
4519 if ( stream_.state != STREAM_STOPPED )
4522 // clean up stream memory
4523 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient )
4524 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient )
4526 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureClient )
4527 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderClient )
4529 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent )
4530 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent );
4532 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent )
4533 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent );
4535 delete ( WasapiHandle* ) stream_.apiHandle;
4536 stream_.apiHandle = NULL;
4538 for ( int i = 0; i < 2; i++ ) {
4539 if ( stream_.userBuffer[i] ) {
4540 free( stream_.userBuffer[i] );
4541 stream_.userBuffer[i] = 0;
4545 if ( stream_.deviceBuffer ) {
4546 free( stream_.deviceBuffer );
4547 stream_.deviceBuffer = 0;
4550 // update stream state
4551 stream_.state = STREAM_CLOSED;
4554 //-----------------------------------------------------------------------------
4556 void RtApiWasapi::startStream( void )
4560 if ( stream_.state == STREAM_RUNNING ) {
4561 errorText_ = "RtApiWasapi::startStream: The stream is already running.";
4562 error( RtAudioError::WARNING );
4566 #if defined( HAVE_GETTIMEOFDAY )
4567 gettimeofday( &stream_.lastTickTimestamp, NULL );
4570 // update stream state
4571 stream_.state = STREAM_RUNNING;
4573 // create WASAPI stream thread
4574 stream_.callbackInfo.thread = ( ThreadHandle ) CreateThread( NULL, 0, runWasapiThread, this, CREATE_SUSPENDED, NULL );
4576 if ( !stream_.callbackInfo.thread ) {
4577 errorText_ = "RtApiWasapi::startStream: Unable to instantiate callback thread.";
4578 error( RtAudioError::THREAD_ERROR );
4581 SetThreadPriority( ( void* ) stream_.callbackInfo.thread, stream_.callbackInfo.priority );
4582 ResumeThread( ( void* ) stream_.callbackInfo.thread );
4586 //-----------------------------------------------------------------------------
4588 void RtApiWasapi::stopStream( void )
4592 if ( stream_.state == STREAM_STOPPED ) {
4593 errorText_ = "RtApiWasapi::stopStream: The stream is already stopped.";
4594 error( RtAudioError::WARNING );
4598 // inform stream thread by setting stream state to STREAM_STOPPING
4599 stream_.state = STREAM_STOPPING;
4601 // wait until stream thread is stopped
4602 while( stream_.state != STREAM_STOPPED ) {
4606 // Wait for the last buffer to play before stopping.
4607 Sleep( 1000 * stream_.bufferSize / stream_.sampleRate );
4609 // stop capture client if applicable
4610 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) {
4611 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient->Stop();
4612 if ( FAILED( hr ) ) {
4613 errorText_ = "RtApiWasapi::stopStream: Unable to stop capture stream.";
4614 error( RtAudioError::DRIVER_ERROR );
4619 // stop render client if applicable
4620 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) {
4621 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient->Stop();
4622 if ( FAILED( hr ) ) {
4623 errorText_ = "RtApiWasapi::stopStream: Unable to stop render stream.";
4624 error( RtAudioError::DRIVER_ERROR );
4629 // close thread handle
4630 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
4631 errorText_ = "RtApiWasapi::stopStream: Unable to close callback thread.";
4632 error( RtAudioError::THREAD_ERROR );
4636 stream_.callbackInfo.thread = (ThreadHandle) NULL;
4639 //-----------------------------------------------------------------------------
4641 void RtApiWasapi::abortStream( void )
4645 if ( stream_.state == STREAM_STOPPED ) {
4646 errorText_ = "RtApiWasapi::abortStream: The stream is already stopped.";
4647 error( RtAudioError::WARNING );
4651 // inform stream thread by setting stream state to STREAM_STOPPING
4652 stream_.state = STREAM_STOPPING;
4654 // wait until stream thread is stopped
4655 while ( stream_.state != STREAM_STOPPED ) {
4659 // stop capture client if applicable
4660 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) {
4661 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient->Stop();
4662 if ( FAILED( hr ) ) {
4663 errorText_ = "RtApiWasapi::abortStream: Unable to stop capture stream.";
4664 error( RtAudioError::DRIVER_ERROR );
4669 // stop render client if applicable
4670 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) {
4671 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient->Stop();
4672 if ( FAILED( hr ) ) {
4673 errorText_ = "RtApiWasapi::abortStream: Unable to stop render stream.";
4674 error( RtAudioError::DRIVER_ERROR );
4679 // close thread handle
4680 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
4681 errorText_ = "RtApiWasapi::abortStream: Unable to close callback thread.";
4682 error( RtAudioError::THREAD_ERROR );
4686 stream_.callbackInfo.thread = (ThreadHandle) NULL;
4689 //-----------------------------------------------------------------------------
4691 bool RtApiWasapi::probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
4692 unsigned int firstChannel, unsigned int sampleRate,
4693 RtAudioFormat format, unsigned int* bufferSize,
4694 RtAudio::StreamOptions* options )
4696 bool methodResult = FAILURE;
4697 unsigned int captureDeviceCount = 0;
4698 unsigned int renderDeviceCount = 0;
4700 IMMDeviceCollection* captureDevices = NULL;
4701 IMMDeviceCollection* renderDevices = NULL;
4702 IMMDevice* devicePtr = NULL;
4703 WAVEFORMATEX* deviceFormat = NULL;
4704 unsigned int bufferBytes;
4705 stream_.state = STREAM_STOPPED;
4707 // create API Handle if not already created
4708 if ( !stream_.apiHandle )
4709 stream_.apiHandle = ( void* ) new WasapiHandle();
4711 // Count capture devices
4713 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4714 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4715 if ( FAILED( hr ) ) {
4716 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device collection.";
4720 hr = captureDevices->GetCount( &captureDeviceCount );
4721 if ( FAILED( hr ) ) {
4722 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device count.";
4726 // Count render devices
4727 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4728 if ( FAILED( hr ) ) {
4729 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device collection.";
4733 hr = renderDevices->GetCount( &renderDeviceCount );
4734 if ( FAILED( hr ) ) {
4735 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device count.";
4739 // validate device index
4740 if ( device >= captureDeviceCount + renderDeviceCount ) {
4741 errorType = RtAudioError::INVALID_USE;
4742 errorText_ = "RtApiWasapi::probeDeviceOpen: Invalid device index.";
4746 // if device index falls within capture devices
4747 if ( device >= renderDeviceCount ) {
4748 if ( mode != INPUT ) {
4749 errorType = RtAudioError::INVALID_USE;
4750 errorText_ = "RtApiWasapi::probeDeviceOpen: Capture device selected as output device.";
4754 // retrieve captureAudioClient from devicePtr
4755 IAudioClient*& captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4757 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
4758 if ( FAILED( hr ) ) {
4759 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device handle.";
4763 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4764 NULL, ( void** ) &captureAudioClient );
4765 if ( FAILED( hr ) ) {
4766 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device audio client.";
4770 hr = captureAudioClient->GetMixFormat( &deviceFormat );
4771 if ( FAILED( hr ) ) {
4772 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device mix format.";
4776 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4777 captureAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4780 // if device index falls within render devices and is configured for loopback
4781 if ( device < renderDeviceCount && mode == INPUT )
4783 // if renderAudioClient is not initialised, initialise it now
4784 IAudioClient*& renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4785 if ( !renderAudioClient )
4787 probeDeviceOpen( device, OUTPUT, channels, firstChannel, sampleRate, format, bufferSize, options );
4790 // retrieve captureAudioClient from devicePtr
4791 IAudioClient*& captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4793 hr = renderDevices->Item( device, &devicePtr );
4794 if ( FAILED( hr ) ) {
4795 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device handle.";
4799 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4800 NULL, ( void** ) &captureAudioClient );
4801 if ( FAILED( hr ) ) {
4802 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device audio client.";
4806 hr = captureAudioClient->GetMixFormat( &deviceFormat );
4807 if ( FAILED( hr ) ) {
4808 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device mix format.";
4812 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4813 captureAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4816 // if device index falls within render devices and is configured for output
4817 if ( device < renderDeviceCount && mode == OUTPUT )
4819 // if renderAudioClient is already initialised, don't initialise it again
4820 IAudioClient*& renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4821 if ( renderAudioClient )
4823 methodResult = SUCCESS;
4827 hr = renderDevices->Item( device, &devicePtr );
4828 if ( FAILED( hr ) ) {
4829 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device handle.";
4833 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4834 NULL, ( void** ) &renderAudioClient );
4835 if ( FAILED( hr ) ) {
4836 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device audio client.";
4840 hr = renderAudioClient->GetMixFormat( &deviceFormat );
4841 if ( FAILED( hr ) ) {
4842 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device mix format.";
4846 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4847 renderAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4851 if ( ( stream_.mode == OUTPUT && mode == INPUT ) ||
4852 ( stream_.mode == INPUT && mode == OUTPUT ) ) {
4853 stream_.mode = DUPLEX;
4856 stream_.mode = mode;
4859 stream_.device[mode] = device;
4860 stream_.doByteSwap[mode] = false;
4861 stream_.sampleRate = sampleRate;
4862 stream_.bufferSize = *bufferSize;
4863 stream_.nBuffers = 1;
4864 stream_.nUserChannels[mode] = channels;
4865 stream_.channelOffset[mode] = firstChannel;
4866 stream_.userFormat = format;
4867 stream_.deviceFormat[mode] = getDeviceInfo( device ).nativeFormats;
4869 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
4870 stream_.userInterleaved = false;
4872 stream_.userInterleaved = true;
4873 stream_.deviceInterleaved[mode] = true;
4875 // Set flags for buffer conversion.
4876 stream_.doConvertBuffer[mode] = false;
4877 if ( stream_.userFormat != stream_.deviceFormat[mode] ||
4878 stream_.nUserChannels[0] != stream_.nDeviceChannels[0] ||
4879 stream_.nUserChannels[1] != stream_.nDeviceChannels[1] )
4880 stream_.doConvertBuffer[mode] = true;
4881 else if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
4882 stream_.nUserChannels[mode] > 1 )
4883 stream_.doConvertBuffer[mode] = true;
4885 if ( stream_.doConvertBuffer[mode] )
4886 setConvertInfo( mode, 0 );
4888 // Allocate necessary internal buffers
4889 bufferBytes = stream_.nUserChannels[mode] * stream_.bufferSize * formatBytes( stream_.userFormat );
4891 stream_.userBuffer[mode] = ( char* ) calloc( bufferBytes, 1 );
4892 if ( !stream_.userBuffer[mode] ) {
4893 errorType = RtAudioError::MEMORY_ERROR;
4894 errorText_ = "RtApiWasapi::probeDeviceOpen: Error allocating user buffer memory.";
4898 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME )
4899 stream_.callbackInfo.priority = 15;
4901 stream_.callbackInfo.priority = 0;
4903 ///! TODO: RTAUDIO_MINIMIZE_LATENCY // Provide stream buffers directly to callback
4904 ///! TODO: RTAUDIO_HOG_DEVICE // Exclusive mode
4906 methodResult = SUCCESS;
4910 SAFE_RELEASE( captureDevices );
4911 SAFE_RELEASE( renderDevices );
4912 SAFE_RELEASE( devicePtr );
4913 CoTaskMemFree( deviceFormat );
4915 // if method failed, close the stream
4916 if ( methodResult == FAILURE )
4919 if ( !errorText_.empty() )
4921 return methodResult;
4924 //=============================================================================
4926 DWORD WINAPI RtApiWasapi::runWasapiThread( void* wasapiPtr )
4929 ( ( RtApiWasapi* ) wasapiPtr )->wasapiThread();
4934 DWORD WINAPI RtApiWasapi::stopWasapiThread( void* wasapiPtr )
4937 ( ( RtApiWasapi* ) wasapiPtr )->stopStream();
4942 DWORD WINAPI RtApiWasapi::abortWasapiThread( void* wasapiPtr )
4945 ( ( RtApiWasapi* ) wasapiPtr )->abortStream();
4950 //-----------------------------------------------------------------------------
4952 void RtApiWasapi::wasapiThread()
4954 // as this is a new thread, we must CoInitialize it
4955 CoInitialize( NULL );
4959 IAudioClient* captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4960 IAudioClient* renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4961 IAudioCaptureClient* captureClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureClient;
4962 IAudioRenderClient* renderClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderClient;
4963 HANDLE captureEvent = ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent;
4964 HANDLE renderEvent = ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent;
4966 WAVEFORMATEX* captureFormat = NULL;
4967 WAVEFORMATEX* renderFormat = NULL;
4968 float captureSrRatio = 0.0f;
4969 float renderSrRatio = 0.0f;
4970 WasapiBuffer captureBuffer;
4971 WasapiBuffer renderBuffer;
4972 WasapiResampler* captureResampler = NULL;
4973 WasapiResampler* renderResampler = NULL;
4975 // declare local stream variables
4976 RtAudioCallback callback = ( RtAudioCallback ) stream_.callbackInfo.callback;
4977 BYTE* streamBuffer = NULL;
4978 unsigned long captureFlags = 0;
4979 unsigned int bufferFrameCount = 0;
4980 unsigned int numFramesPadding = 0;
4981 unsigned int convBufferSize = 0;
4982 bool loopbackEnabled = stream_.device[INPUT] == stream_.device[OUTPUT];
4983 bool callbackPushed = true;
4984 bool callbackPulled = false;
4985 bool callbackStopped = false;
4986 int callbackResult = 0;
4988 // convBuffer is used to store converted buffers between WASAPI and the user
4989 char* convBuffer = NULL;
4990 unsigned int convBuffSize = 0;
4991 unsigned int deviceBuffSize = 0;
4993 std::string errorText;
4994 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4996 // Attempt to assign "Pro Audio" characteristic to thread
4997 HMODULE AvrtDll = LoadLibrary( (LPCTSTR) "AVRT.dll" );
4999 DWORD taskIndex = 0;
5000 TAvSetMmThreadCharacteristicsPtr AvSetMmThreadCharacteristicsPtr = ( TAvSetMmThreadCharacteristicsPtr ) GetProcAddress( AvrtDll, "AvSetMmThreadCharacteristicsW" );
5001 AvSetMmThreadCharacteristicsPtr( L"Pro Audio", &taskIndex );
5002 FreeLibrary( AvrtDll );
5005 // start capture stream if applicable
5006 if ( captureAudioClient ) {
5007 hr = captureAudioClient->GetMixFormat( &captureFormat );
5008 if ( FAILED( hr ) ) {
5009 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
5013 // init captureResampler
5014 captureResampler = new WasapiResampler( stream_.deviceFormat[INPUT] == RTAUDIO_FLOAT32 || stream_.deviceFormat[INPUT] == RTAUDIO_FLOAT64,
5015 formatBytes( stream_.deviceFormat[INPUT] ) * 8, stream_.nDeviceChannels[INPUT],
5016 captureFormat->nSamplesPerSec, stream_.sampleRate );
5018 captureSrRatio = ( ( float ) captureFormat->nSamplesPerSec / stream_.sampleRate );
5020 if ( !captureClient ) {
5021 hr = captureAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
5022 loopbackEnabled ? AUDCLNT_STREAMFLAGS_LOOPBACK : AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
5027 if ( FAILED( hr ) ) {
5028 errorText = "RtApiWasapi::wasapiThread: Unable to initialize capture audio client.";
5032 hr = captureAudioClient->GetService( __uuidof( IAudioCaptureClient ),
5033 ( void** ) &captureClient );
5034 if ( FAILED( hr ) ) {
5035 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve capture client handle.";
5039 // don't configure captureEvent if in loopback mode
5040 if ( !loopbackEnabled )
5042 // configure captureEvent to trigger on every available capture buffer
5043 captureEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
5044 if ( !captureEvent ) {
5045 errorType = RtAudioError::SYSTEM_ERROR;
5046 errorText = "RtApiWasapi::wasapiThread: Unable to create capture event.";
5050 hr = captureAudioClient->SetEventHandle( captureEvent );
5051 if ( FAILED( hr ) ) {
5052 errorText = "RtApiWasapi::wasapiThread: Unable to set capture event handle.";
5056 ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent = captureEvent;
5059 ( ( WasapiHandle* ) stream_.apiHandle )->captureClient = captureClient;
5062 unsigned int inBufferSize = 0;
5063 hr = captureAudioClient->GetBufferSize( &inBufferSize );
5064 if ( FAILED( hr ) ) {
5065 errorText = "RtApiWasapi::wasapiThread: Unable to get capture buffer size.";
5069 // scale outBufferSize according to stream->user sample rate ratio
5070 unsigned int outBufferSize = ( unsigned int ) ceilf( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT];
5071 inBufferSize *= stream_.nDeviceChannels[INPUT];
5073 // set captureBuffer size
5074 captureBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[INPUT] ) );
5076 // reset the capture stream
5077 hr = captureAudioClient->Reset();
5078 if ( FAILED( hr ) ) {
5079 errorText = "RtApiWasapi::wasapiThread: Unable to reset capture stream.";
5083 // start the capture stream
5084 hr = captureAudioClient->Start();
5085 if ( FAILED( hr ) ) {
5086 errorText = "RtApiWasapi::wasapiThread: Unable to start capture stream.";
5091 // start render stream if applicable
5092 if ( renderAudioClient ) {
5093 hr = renderAudioClient->GetMixFormat( &renderFormat );
5094 if ( FAILED( hr ) ) {
5095 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
5099 // init renderResampler
5100 renderResampler = new WasapiResampler( stream_.deviceFormat[OUTPUT] == RTAUDIO_FLOAT32 || stream_.deviceFormat[OUTPUT] == RTAUDIO_FLOAT64,
5101 formatBytes( stream_.deviceFormat[OUTPUT] ) * 8, stream_.nDeviceChannels[OUTPUT],
5102 stream_.sampleRate, renderFormat->nSamplesPerSec );
5104 renderSrRatio = ( ( float ) renderFormat->nSamplesPerSec / stream_.sampleRate );
5106 if ( !renderClient ) {
5107 hr = renderAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
5108 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
5113 if ( FAILED( hr ) ) {
5114 errorText = "RtApiWasapi::wasapiThread: Unable to initialize render audio client.";
5118 hr = renderAudioClient->GetService( __uuidof( IAudioRenderClient ),
5119 ( void** ) &renderClient );
5120 if ( FAILED( hr ) ) {
5121 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve render client handle.";
5125 // configure renderEvent to trigger on every available render buffer
5126 renderEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
5127 if ( !renderEvent ) {
5128 errorType = RtAudioError::SYSTEM_ERROR;
5129 errorText = "RtApiWasapi::wasapiThread: Unable to create render event.";
5133 hr = renderAudioClient->SetEventHandle( renderEvent );
5134 if ( FAILED( hr ) ) {
5135 errorText = "RtApiWasapi::wasapiThread: Unable to set render event handle.";
5139 ( ( WasapiHandle* ) stream_.apiHandle )->renderClient = renderClient;
5140 ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent = renderEvent;
5143 unsigned int outBufferSize = 0;
5144 hr = renderAudioClient->GetBufferSize( &outBufferSize );
5145 if ( FAILED( hr ) ) {
5146 errorText = "RtApiWasapi::wasapiThread: Unable to get render buffer size.";
5150 // scale inBufferSize according to user->stream sample rate ratio
5151 unsigned int inBufferSize = ( unsigned int ) ceilf( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT];
5152 outBufferSize *= stream_.nDeviceChannels[OUTPUT];
5154 // set renderBuffer size
5155 renderBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[OUTPUT] ) );
5157 // reset the render stream
5158 hr = renderAudioClient->Reset();
5159 if ( FAILED( hr ) ) {
5160 errorText = "RtApiWasapi::wasapiThread: Unable to reset render stream.";
5164 // start the render stream
5165 hr = renderAudioClient->Start();
5166 if ( FAILED( hr ) ) {
5167 errorText = "RtApiWasapi::wasapiThread: Unable to start render stream.";
5172 // malloc buffer memory
5173 if ( stream_.mode == INPUT )
5175 using namespace std; // for ceilf
5176 convBuffSize = ( size_t ) ( ceilf( stream_.bufferSize * captureSrRatio ) ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
5177 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
5179 else if ( stream_.mode == OUTPUT )
5181 convBuffSize = ( size_t ) ( ceilf( stream_.bufferSize * renderSrRatio ) ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
5182 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
5184 else if ( stream_.mode == DUPLEX )
5186 convBuffSize = std::max( ( size_t ) ( ceilf( stream_.bufferSize * captureSrRatio ) ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
5187 ( size_t ) ( ceilf( stream_.bufferSize * renderSrRatio ) ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
5188 deviceBuffSize = std::max( stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
5189 stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
5192 convBuffSize *= 2; // allow overflow for *SrRatio remainders
5193 convBuffer = ( char* ) malloc( convBuffSize );
5194 stream_.deviceBuffer = ( char* ) malloc( deviceBuffSize );
5195 if ( !convBuffer || !stream_.deviceBuffer ) {
5196 errorType = RtAudioError::MEMORY_ERROR;
5197 errorText = "RtApiWasapi::wasapiThread: Error allocating device buffer memory.";
5201 // stream process loop
5202 while ( stream_.state != STREAM_STOPPING ) {
5203 if ( !callbackPulled ) {
5206 // 1. Pull callback buffer from inputBuffer
5207 // 2. If 1. was successful: Convert callback buffer to user sample rate and channel count
5208 // Convert callback buffer to user format
5210 if ( captureAudioClient )
5212 int samplesToPull = ( unsigned int ) floorf( stream_.bufferSize * captureSrRatio );
5213 if ( captureSrRatio != 1 )
5215 // account for remainders
5220 while ( convBufferSize < stream_.bufferSize )
5222 // Pull callback buffer from inputBuffer
5223 callbackPulled = captureBuffer.pullBuffer( convBuffer,
5224 samplesToPull * stream_.nDeviceChannels[INPUT],
5225 stream_.deviceFormat[INPUT] );
5227 if ( !callbackPulled )
5232 // Convert callback buffer to user sample rate
5233 unsigned int deviceBufferOffset = convBufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
5234 unsigned int convSamples = 0;
5236 captureResampler->Convert( stream_.deviceBuffer + deviceBufferOffset,
5241 convBufferSize += convSamples;
5242 samplesToPull = 1; // now pull one sample at a time until we have stream_.bufferSize samples
5245 if ( callbackPulled )
5247 if ( stream_.doConvertBuffer[INPUT] ) {
5248 // Convert callback buffer to user format
5249 convertBuffer( stream_.userBuffer[INPUT],
5250 stream_.deviceBuffer,
5251 stream_.convertInfo[INPUT] );
5254 // no further conversion, simple copy deviceBuffer to userBuffer
5255 memcpy( stream_.userBuffer[INPUT],
5256 stream_.deviceBuffer,
5257 stream_.bufferSize * stream_.nUserChannels[INPUT] * formatBytes( stream_.userFormat ) );
5262 // if there is no capture stream, set callbackPulled flag
5263 callbackPulled = true;
5268 // 1. Execute user callback method
5269 // 2. Handle return value from callback
5271 // if callback has not requested the stream to stop
5272 if ( callbackPulled && !callbackStopped ) {
5273 // Execute user callback method
5274 callbackResult = callback( stream_.userBuffer[OUTPUT],
5275 stream_.userBuffer[INPUT],
5278 captureFlags & AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY ? RTAUDIO_INPUT_OVERFLOW : 0,
5279 stream_.callbackInfo.userData );
5281 // Handle return value from callback
5282 if ( callbackResult == 1 ) {
5283 // instantiate a thread to stop this thread
5284 HANDLE threadHandle = CreateThread( NULL, 0, stopWasapiThread, this, 0, NULL );
5285 if ( !threadHandle ) {
5286 errorType = RtAudioError::THREAD_ERROR;
5287 errorText = "RtApiWasapi::wasapiThread: Unable to instantiate stream stop thread.";
5290 else if ( !CloseHandle( threadHandle ) ) {
5291 errorType = RtAudioError::THREAD_ERROR;
5292 errorText = "RtApiWasapi::wasapiThread: Unable to close stream stop thread handle.";
5296 callbackStopped = true;
5298 else if ( callbackResult == 2 ) {
5299 // instantiate a thread to stop this thread
5300 HANDLE threadHandle = CreateThread( NULL, 0, abortWasapiThread, this, 0, NULL );
5301 if ( !threadHandle ) {
5302 errorType = RtAudioError::THREAD_ERROR;
5303 errorText = "RtApiWasapi::wasapiThread: Unable to instantiate stream abort thread.";
5306 else if ( !CloseHandle( threadHandle ) ) {
5307 errorType = RtAudioError::THREAD_ERROR;
5308 errorText = "RtApiWasapi::wasapiThread: Unable to close stream abort thread handle.";
5312 callbackStopped = true;
5319 // 1. Convert callback buffer to stream format
5320 // 2. Convert callback buffer to stream sample rate and channel count
5321 // 3. Push callback buffer into outputBuffer
5323 if ( renderAudioClient && callbackPulled )
5325 // if the last call to renderBuffer.PushBuffer() was successful
5326 if ( callbackPushed || convBufferSize == 0 )
5328 if ( stream_.doConvertBuffer[OUTPUT] )
5330 // Convert callback buffer to stream format
5331 convertBuffer( stream_.deviceBuffer,
5332 stream_.userBuffer[OUTPUT],
5333 stream_.convertInfo[OUTPUT] );
5337 // no further conversion, simple copy userBuffer to deviceBuffer
5338 memcpy( stream_.deviceBuffer,
5339 stream_.userBuffer[OUTPUT],
5340 stream_.bufferSize * stream_.nUserChannels[OUTPUT] * formatBytes( stream_.userFormat ) );
5343 // Convert callback buffer to stream sample rate
5344 renderResampler->Convert( convBuffer,
5345 stream_.deviceBuffer,
5350 // Push callback buffer into outputBuffer
5351 callbackPushed = renderBuffer.pushBuffer( convBuffer,
5352 convBufferSize * stream_.nDeviceChannels[OUTPUT],
5353 stream_.deviceFormat[OUTPUT] );
5356 // if there is no render stream, set callbackPushed flag
5357 callbackPushed = true;
5362 // 1. Get capture buffer from stream
5363 // 2. Push capture buffer into inputBuffer
5364 // 3. If 2. was successful: Release capture buffer
5366 if ( captureAudioClient ) {
5367 // if the callback input buffer was not pulled from captureBuffer, wait for next capture event
5368 if ( !callbackPulled ) {
5369 WaitForSingleObject( loopbackEnabled ? renderEvent : captureEvent, INFINITE );
5372 // Get capture buffer from stream
5373 hr = captureClient->GetBuffer( &streamBuffer,
5375 &captureFlags, NULL, NULL );
5376 if ( FAILED( hr ) ) {
5377 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve capture buffer.";
5381 if ( bufferFrameCount != 0 ) {
5382 // Push capture buffer into inputBuffer
5383 if ( captureBuffer.pushBuffer( ( char* ) streamBuffer,
5384 bufferFrameCount * stream_.nDeviceChannels[INPUT],
5385 stream_.deviceFormat[INPUT] ) )
5387 // Release capture buffer
5388 hr = captureClient->ReleaseBuffer( bufferFrameCount );
5389 if ( FAILED( hr ) ) {
5390 errorText = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5396 // Inform WASAPI that capture was unsuccessful
5397 hr = captureClient->ReleaseBuffer( 0 );
5398 if ( FAILED( hr ) ) {
5399 errorText = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5406 // Inform WASAPI that capture was unsuccessful
5407 hr = captureClient->ReleaseBuffer( 0 );
5408 if ( FAILED( hr ) ) {
5409 errorText = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5417 // 1. Get render buffer from stream
5418 // 2. Pull next buffer from outputBuffer
5419 // 3. If 2. was successful: Fill render buffer with next buffer
5420 // Release render buffer
5422 if ( renderAudioClient ) {
5423 // if the callback output buffer was not pushed to renderBuffer, wait for next render event
5424 if ( callbackPulled && !callbackPushed ) {
5425 WaitForSingleObject( renderEvent, INFINITE );
5428 // Get render buffer from stream
5429 hr = renderAudioClient->GetBufferSize( &bufferFrameCount );
5430 if ( FAILED( hr ) ) {
5431 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer size.";
5435 hr = renderAudioClient->GetCurrentPadding( &numFramesPadding );
5436 if ( FAILED( hr ) ) {
5437 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer padding.";
5441 bufferFrameCount -= numFramesPadding;
5443 if ( bufferFrameCount != 0 ) {
5444 hr = renderClient->GetBuffer( bufferFrameCount, &streamBuffer );
5445 if ( FAILED( hr ) ) {
5446 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer.";
5450 // Pull next buffer from outputBuffer
5451 // Fill render buffer with next buffer
5452 if ( renderBuffer.pullBuffer( ( char* ) streamBuffer,
5453 bufferFrameCount * stream_.nDeviceChannels[OUTPUT],
5454 stream_.deviceFormat[OUTPUT] ) )
5456 // Release render buffer
5457 hr = renderClient->ReleaseBuffer( bufferFrameCount, 0 );
5458 if ( FAILED( hr ) ) {
5459 errorText = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5465 // Inform WASAPI that render was unsuccessful
5466 hr = renderClient->ReleaseBuffer( 0, 0 );
5467 if ( FAILED( hr ) ) {
5468 errorText = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5475 // Inform WASAPI that render was unsuccessful
5476 hr = renderClient->ReleaseBuffer( 0, 0 );
5477 if ( FAILED( hr ) ) {
5478 errorText = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5484 // if the callback buffer was pushed renderBuffer reset callbackPulled flag
5485 if ( callbackPushed ) {
5486 // unsetting the callbackPulled flag lets the stream know that
5487 // the audio device is ready for another callback output buffer.
5488 callbackPulled = false;
5491 RtApi::tickStreamTime();
5498 CoTaskMemFree( captureFormat );
5499 CoTaskMemFree( renderFormat );
5501 free ( convBuffer );
5502 delete renderResampler;
5503 delete captureResampler;
5507 // update stream state
5508 stream_.state = STREAM_STOPPED;
5510 if ( !errorText.empty() )
5512 errorText_ = errorText;
5517 //******************** End of __WINDOWS_WASAPI__ *********************//
5521 #if defined(__WINDOWS_DS__) // Windows DirectSound API
5523 // Modified by Robin Davies, October 2005
5524 // - Improvements to DirectX pointer chasing.
5525 // - Bug fix for non-power-of-two Asio granularity used by Edirol PCR-A30.
5526 // - Auto-call CoInitialize for DSOUND and ASIO platforms.
5527 // Various revisions for RtAudio 4.0 by Gary Scavone, April 2007
5528 // Changed device query structure for RtAudio 4.0.7, January 2010
5530 #include <windows.h>
5531 #include <process.h>
5532 #include <mmsystem.h>
5536 #include <algorithm>
5538 #if defined(__MINGW32__)
5539 // missing from latest mingw winapi
5540 #define WAVE_FORMAT_96M08 0x00010000 /* 96 kHz, Mono, 8-bit */
5541 #define WAVE_FORMAT_96S08 0x00020000 /* 96 kHz, Stereo, 8-bit */
5542 #define WAVE_FORMAT_96M16 0x00040000 /* 96 kHz, Mono, 16-bit */
5543 #define WAVE_FORMAT_96S16 0x00080000 /* 96 kHz, Stereo, 16-bit */
5546 #define MINIMUM_DEVICE_BUFFER_SIZE 32768
5548 #ifdef _MSC_VER // if Microsoft Visual C++
5549 #pragma comment( lib, "winmm.lib" ) // then, auto-link winmm.lib. Otherwise, it has to be added manually.
5552 static inline DWORD dsPointerBetween( DWORD pointer, DWORD laterPointer, DWORD earlierPointer, DWORD bufferSize )
5554 if ( pointer > bufferSize ) pointer -= bufferSize;
5555 if ( laterPointer < earlierPointer ) laterPointer += bufferSize;
5556 if ( pointer < earlierPointer ) pointer += bufferSize;
5557 return pointer >= earlierPointer && pointer < laterPointer;
5560 // A structure to hold various information related to the DirectSound
5561 // API implementation.
5563 unsigned int drainCounter; // Tracks callback counts when draining
5564 bool internalDrain; // Indicates if stop is initiated from callback or not.
5568 UINT bufferPointer[2];
5569 DWORD dsBufferSize[2];
5570 DWORD dsPointerLeadTime[2]; // the number of bytes ahead of the safe pointer to lead by.
5574 :drainCounter(0), internalDrain(false) { id[0] = 0; id[1] = 0; buffer[0] = 0; buffer[1] = 0; xrun[0] = false; xrun[1] = false; bufferPointer[0] = 0; bufferPointer[1] = 0; }
5577 // Declarations for utility functions, callbacks, and structures
5578 // specific to the DirectSound implementation.
5579 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
5580 LPCTSTR description,
5584 static const char* getErrorString( int code );
5586 static unsigned __stdcall callbackHandler( void *ptr );
5595 : found(false) { validId[0] = false; validId[1] = false; }
5598 struct DsProbeData {
5600 std::vector<struct DsDevice>* dsDevices;
5603 RtApiDs :: RtApiDs()
5605 // Dsound will run both-threaded. If CoInitialize fails, then just
5606 // accept whatever the mainline chose for a threading model.
5607 coInitialized_ = false;
5608 HRESULT hr = CoInitialize( NULL );
5609 if ( !FAILED( hr ) ) coInitialized_ = true;
5612 RtApiDs :: ~RtApiDs()
5614 if ( stream_.state != STREAM_CLOSED ) closeStream();
5615 if ( coInitialized_ ) CoUninitialize(); // balanced call.
5618 // The DirectSound default output is always the first device.
5619 unsigned int RtApiDs :: getDefaultOutputDevice( void )
5624 // The DirectSound default input is always the first input device,
5625 // which is the first capture device enumerated.
5626 unsigned int RtApiDs :: getDefaultInputDevice( void )
5631 unsigned int RtApiDs :: getDeviceCount( void )
5633 // Set query flag for previously found devices to false, so that we
5634 // can check for any devices that have disappeared.
5635 for ( unsigned int i=0; i<dsDevices.size(); i++ )
5636 dsDevices[i].found = false;
5638 // Query DirectSound devices.
5639 struct DsProbeData probeInfo;
5640 probeInfo.isInput = false;
5641 probeInfo.dsDevices = &dsDevices;
5642 HRESULT result = DirectSoundEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
5643 if ( FAILED( result ) ) {
5644 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating output devices!";
5645 errorText_ = errorStream_.str();
5646 error( RtAudioError::WARNING );
5649 // Query DirectSoundCapture devices.
5650 probeInfo.isInput = true;
5651 result = DirectSoundCaptureEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
5652 if ( FAILED( result ) ) {
5653 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating input devices!";
5654 errorText_ = errorStream_.str();
5655 error( RtAudioError::WARNING );
5658 // Clean out any devices that may have disappeared (code update submitted by Eli Zehngut).
5659 for ( unsigned int i=0; i<dsDevices.size(); ) {
5660 if ( dsDevices[i].found == false ) dsDevices.erase( dsDevices.begin() + i );
5664 return static_cast<unsigned int>(dsDevices.size());
5667 RtAudio::DeviceInfo RtApiDs :: getDeviceInfo( unsigned int device )
5669 RtAudio::DeviceInfo info;
5670 info.probed = false;
5672 if ( dsDevices.size() == 0 ) {
5673 // Force a query of all devices
5675 if ( dsDevices.size() == 0 ) {
5676 errorText_ = "RtApiDs::getDeviceInfo: no devices found!";
5677 error( RtAudioError::INVALID_USE );
5682 if ( device >= dsDevices.size() ) {
5683 errorText_ = "RtApiDs::getDeviceInfo: device ID is invalid!";
5684 error( RtAudioError::INVALID_USE );
5689 if ( dsDevices[ device ].validId[0] == false ) goto probeInput;
5691 LPDIRECTSOUND output;
5693 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
5694 if ( FAILED( result ) ) {
5695 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
5696 errorText_ = errorStream_.str();
5697 error( RtAudioError::WARNING );
5701 outCaps.dwSize = sizeof( outCaps );
5702 result = output->GetCaps( &outCaps );
5703 if ( FAILED( result ) ) {
5705 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting capabilities!";
5706 errorText_ = errorStream_.str();
5707 error( RtAudioError::WARNING );
5711 // Get output channel information.
5712 info.outputChannels = ( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ? 2 : 1;
5714 // Get sample rate information.
5715 info.sampleRates.clear();
5716 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
5717 if ( SAMPLE_RATES[k] >= (unsigned int) outCaps.dwMinSecondarySampleRate &&
5718 SAMPLE_RATES[k] <= (unsigned int) outCaps.dwMaxSecondarySampleRate ) {
5719 info.sampleRates.push_back( SAMPLE_RATES[k] );
5721 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
5722 info.preferredSampleRate = SAMPLE_RATES[k];
5726 // Get format information.
5727 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT ) info.nativeFormats |= RTAUDIO_SINT16;
5728 if ( outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) info.nativeFormats |= RTAUDIO_SINT8;
5732 if ( getDefaultOutputDevice() == device )
5733 info.isDefaultOutput = true;
5735 if ( dsDevices[ device ].validId[1] == false ) {
5736 info.name = dsDevices[ device ].name;
5743 LPDIRECTSOUNDCAPTURE input;
5744 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
5745 if ( FAILED( result ) ) {
5746 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
5747 errorText_ = errorStream_.str();
5748 error( RtAudioError::WARNING );
5753 inCaps.dwSize = sizeof( inCaps );
5754 result = input->GetCaps( &inCaps );
5755 if ( FAILED( result ) ) {
5757 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting object capabilities (" << dsDevices[ device ].name << ")!";
5758 errorText_ = errorStream_.str();
5759 error( RtAudioError::WARNING );
5763 // Get input channel information.
5764 info.inputChannels = inCaps.dwChannels;
5766 // Get sample rate and format information.
5767 std::vector<unsigned int> rates;
5768 if ( inCaps.dwChannels >= 2 ) {
5769 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5770 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5771 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5772 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5773 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5774 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5775 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5776 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5778 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
5779 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) rates.push_back( 11025 );
5780 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) rates.push_back( 22050 );
5781 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) rates.push_back( 44100 );
5782 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) rates.push_back( 96000 );
5784 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
5785 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) rates.push_back( 11025 );
5786 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) rates.push_back( 22050 );
5787 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) rates.push_back( 44100 );
5788 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) rates.push_back( 96000 );
5791 else if ( inCaps.dwChannels == 1 ) {
5792 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5793 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5794 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5795 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5796 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5797 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5798 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5799 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5801 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
5802 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) rates.push_back( 11025 );
5803 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) rates.push_back( 22050 );
5804 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) rates.push_back( 44100 );
5805 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) rates.push_back( 96000 );
5807 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
5808 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) rates.push_back( 11025 );
5809 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) rates.push_back( 22050 );
5810 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) rates.push_back( 44100 );
5811 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) rates.push_back( 96000 );
5814 else info.inputChannels = 0; // technically, this would be an error
5818 if ( info.inputChannels == 0 ) return info;
5820 // Copy the supported rates to the info structure but avoid duplication.
5822 for ( unsigned int i=0; i<rates.size(); i++ ) {
5824 for ( unsigned int j=0; j<info.sampleRates.size(); j++ ) {
5825 if ( rates[i] == info.sampleRates[j] ) {
5830 if ( found == false ) info.sampleRates.push_back( rates[i] );
5832 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
5834 // If device opens for both playback and capture, we determine the channels.
5835 if ( info.outputChannels > 0 && info.inputChannels > 0 )
5836 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
5838 if ( device == 0 ) info.isDefaultInput = true;
5840 // Copy name and return.
5841 info.name = dsDevices[ device ].name;
5846 bool RtApiDs :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
5847 unsigned int firstChannel, unsigned int sampleRate,
5848 RtAudioFormat format, unsigned int *bufferSize,
5849 RtAudio::StreamOptions *options )
5851 if ( channels + firstChannel > 2 ) {
5852 errorText_ = "RtApiDs::probeDeviceOpen: DirectSound does not support more than 2 channels per device.";
5856 size_t nDevices = dsDevices.size();
5857 if ( nDevices == 0 ) {
5858 // This should not happen because a check is made before this function is called.
5859 errorText_ = "RtApiDs::probeDeviceOpen: no devices found!";
5863 if ( device >= nDevices ) {
5864 // This should not happen because a check is made before this function is called.
5865 errorText_ = "RtApiDs::probeDeviceOpen: device ID is invalid!";
5869 if ( mode == OUTPUT ) {
5870 if ( dsDevices[ device ].validId[0] == false ) {
5871 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support output!";
5872 errorText_ = errorStream_.str();
5876 else { // mode == INPUT
5877 if ( dsDevices[ device ].validId[1] == false ) {
5878 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support input!";
5879 errorText_ = errorStream_.str();
5884 // According to a note in PortAudio, using GetDesktopWindow()
5885 // instead of GetForegroundWindow() is supposed to avoid problems
5886 // that occur when the application's window is not the foreground
5887 // window. Also, if the application window closes before the
5888 // DirectSound buffer, DirectSound can crash. In the past, I had
5889 // problems when using GetDesktopWindow() but it seems fine now
5890 // (January 2010). I'll leave it commented here.
5891 // HWND hWnd = GetForegroundWindow();
5892 HWND hWnd = GetDesktopWindow();
5894 // Check the numberOfBuffers parameter and limit the lowest value to
5895 // two. This is a judgement call and a value of two is probably too
5896 // low for capture, but it should work for playback.
5898 if ( options ) nBuffers = options->numberOfBuffers;
5899 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) nBuffers = 2;
5900 if ( nBuffers < 2 ) nBuffers = 3;
5902 // Check the lower range of the user-specified buffer size and set
5903 // (arbitrarily) to a lower bound of 32.
5904 if ( *bufferSize < 32 ) *bufferSize = 32;
5906 // Create the wave format structure. The data format setting will
5907 // be determined later.
5908 WAVEFORMATEX waveFormat;
5909 ZeroMemory( &waveFormat, sizeof(WAVEFORMATEX) );
5910 waveFormat.wFormatTag = WAVE_FORMAT_PCM;
5911 waveFormat.nChannels = channels + firstChannel;
5912 waveFormat.nSamplesPerSec = (unsigned long) sampleRate;
5914 // Determine the device buffer size. By default, we'll use the value
5915 // defined above (32K), but we will grow it to make allowances for
5916 // very large software buffer sizes.
5917 DWORD dsBufferSize = MINIMUM_DEVICE_BUFFER_SIZE;
5918 DWORD dsPointerLeadTime = 0;
5920 void *ohandle = 0, *bhandle = 0;
5922 if ( mode == OUTPUT ) {
5924 LPDIRECTSOUND output;
5925 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
5926 if ( FAILED( result ) ) {
5927 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
5928 errorText_ = errorStream_.str();
5933 outCaps.dwSize = sizeof( outCaps );
5934 result = output->GetCaps( &outCaps );
5935 if ( FAILED( result ) ) {
5937 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting capabilities (" << dsDevices[ device ].name << ")!";
5938 errorText_ = errorStream_.str();
5942 // Check channel information.
5943 if ( channels + firstChannel == 2 && !( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ) {
5944 errorStream_ << "RtApiDs::getDeviceInfo: the output device (" << dsDevices[ device ].name << ") does not support stereo playback.";
5945 errorText_ = errorStream_.str();
5949 // Check format information. Use 16-bit format unless not
5950 // supported or user requests 8-bit.
5951 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT &&
5952 !( format == RTAUDIO_SINT8 && outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) ) {
5953 waveFormat.wBitsPerSample = 16;
5954 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
5957 waveFormat.wBitsPerSample = 8;
5958 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
5960 stream_.userFormat = format;
5962 // Update wave format structure and buffer information.
5963 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
5964 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
5965 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
5967 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
5968 while ( dsPointerLeadTime * 2U > dsBufferSize )
5971 // Set cooperative level to DSSCL_EXCLUSIVE ... sound stops when window focus changes.
5972 // result = output->SetCooperativeLevel( hWnd, DSSCL_EXCLUSIVE );
5973 // Set cooperative level to DSSCL_PRIORITY ... sound remains when window focus changes.
5974 result = output->SetCooperativeLevel( hWnd, DSSCL_PRIORITY );
5975 if ( FAILED( result ) ) {
5977 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting cooperative level (" << dsDevices[ device ].name << ")!";
5978 errorText_ = errorStream_.str();
5982 // Even though we will write to the secondary buffer, we need to
5983 // access the primary buffer to set the correct output format
5984 // (since the default is 8-bit, 22 kHz!). Setup the DS primary
5985 // buffer description.
5986 DSBUFFERDESC bufferDescription;
5987 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
5988 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
5989 bufferDescription.dwFlags = DSBCAPS_PRIMARYBUFFER;
5991 // Obtain the primary buffer
5992 LPDIRECTSOUNDBUFFER buffer;
5993 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
5994 if ( FAILED( result ) ) {
5996 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") accessing primary buffer (" << dsDevices[ device ].name << ")!";
5997 errorText_ = errorStream_.str();
6001 // Set the primary DS buffer sound format.
6002 result = buffer->SetFormat( &waveFormat );
6003 if ( FAILED( result ) ) {
6005 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting primary buffer format (" << dsDevices[ device ].name << ")!";
6006 errorText_ = errorStream_.str();
6010 // Setup the secondary DS buffer description.
6011 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
6012 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
6013 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
6014 DSBCAPS_GLOBALFOCUS |
6015 DSBCAPS_GETCURRENTPOSITION2 |
6016 DSBCAPS_LOCHARDWARE ); // Force hardware mixing
6017 bufferDescription.dwBufferBytes = dsBufferSize;
6018 bufferDescription.lpwfxFormat = &waveFormat;
6020 // Try to create the secondary DS buffer. If that doesn't work,
6021 // try to use software mixing. Otherwise, there's a problem.
6022 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
6023 if ( FAILED( result ) ) {
6024 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
6025 DSBCAPS_GLOBALFOCUS |
6026 DSBCAPS_GETCURRENTPOSITION2 |
6027 DSBCAPS_LOCSOFTWARE ); // Force software mixing
6028 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
6029 if ( FAILED( result ) ) {
6031 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating secondary buffer (" << dsDevices[ device ].name << ")!";
6032 errorText_ = errorStream_.str();
6037 // Get the buffer size ... might be different from what we specified.
6039 dsbcaps.dwSize = sizeof( DSBCAPS );
6040 result = buffer->GetCaps( &dsbcaps );
6041 if ( FAILED( result ) ) {
6044 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
6045 errorText_ = errorStream_.str();
6049 dsBufferSize = dsbcaps.dwBufferBytes;
6051 // Lock the DS buffer
6054 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
6055 if ( FAILED( result ) ) {
6058 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking buffer (" << dsDevices[ device ].name << ")!";
6059 errorText_ = errorStream_.str();
6063 // Zero the DS buffer
6064 ZeroMemory( audioPtr, dataLen );
6066 // Unlock the DS buffer
6067 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6068 if ( FAILED( result ) ) {
6071 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking buffer (" << dsDevices[ device ].name << ")!";
6072 errorText_ = errorStream_.str();
6076 ohandle = (void *) output;
6077 bhandle = (void *) buffer;
6080 if ( mode == INPUT ) {
6082 LPDIRECTSOUNDCAPTURE input;
6083 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
6084 if ( FAILED( result ) ) {
6085 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
6086 errorText_ = errorStream_.str();
6091 inCaps.dwSize = sizeof( inCaps );
6092 result = input->GetCaps( &inCaps );
6093 if ( FAILED( result ) ) {
6095 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting input capabilities (" << dsDevices[ device ].name << ")!";
6096 errorText_ = errorStream_.str();
6100 // Check channel information.
6101 if ( inCaps.dwChannels < channels + firstChannel ) {
6102 errorText_ = "RtApiDs::getDeviceInfo: the input device does not support requested input channels.";
6106 // Check format information. Use 16-bit format unless user
6108 DWORD deviceFormats;
6109 if ( channels + firstChannel == 2 ) {
6110 deviceFormats = WAVE_FORMAT_1S08 | WAVE_FORMAT_2S08 | WAVE_FORMAT_4S08 | WAVE_FORMAT_96S08;
6111 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
6112 waveFormat.wBitsPerSample = 8;
6113 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
6115 else { // assume 16-bit is supported
6116 waveFormat.wBitsPerSample = 16;
6117 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
6120 else { // channel == 1
6121 deviceFormats = WAVE_FORMAT_1M08 | WAVE_FORMAT_2M08 | WAVE_FORMAT_4M08 | WAVE_FORMAT_96M08;
6122 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
6123 waveFormat.wBitsPerSample = 8;
6124 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
6126 else { // assume 16-bit is supported
6127 waveFormat.wBitsPerSample = 16;
6128 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
6131 stream_.userFormat = format;
6133 // Update wave format structure and buffer information.
6134 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
6135 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
6136 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
6138 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
6139 while ( dsPointerLeadTime * 2U > dsBufferSize )
6142 // Setup the secondary DS buffer description.
6143 DSCBUFFERDESC bufferDescription;
6144 ZeroMemory( &bufferDescription, sizeof( DSCBUFFERDESC ) );
6145 bufferDescription.dwSize = sizeof( DSCBUFFERDESC );
6146 bufferDescription.dwFlags = 0;
6147 bufferDescription.dwReserved = 0;
6148 bufferDescription.dwBufferBytes = dsBufferSize;
6149 bufferDescription.lpwfxFormat = &waveFormat;
6151 // Create the capture buffer.
6152 LPDIRECTSOUNDCAPTUREBUFFER buffer;
6153 result = input->CreateCaptureBuffer( &bufferDescription, &buffer, NULL );
6154 if ( FAILED( result ) ) {
6156 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating input buffer (" << dsDevices[ device ].name << ")!";
6157 errorText_ = errorStream_.str();
6161 // Get the buffer size ... might be different from what we specified.
6163 dscbcaps.dwSize = sizeof( DSCBCAPS );
6164 result = buffer->GetCaps( &dscbcaps );
6165 if ( FAILED( result ) ) {
6168 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
6169 errorText_ = errorStream_.str();
6173 dsBufferSize = dscbcaps.dwBufferBytes;
6175 // NOTE: We could have a problem here if this is a duplex stream
6176 // and the play and capture hardware buffer sizes are different
6177 // (I'm actually not sure if that is a problem or not).
6178 // Currently, we are not verifying that.
6180 // Lock the capture buffer
6183 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
6184 if ( FAILED( result ) ) {
6187 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking input buffer (" << dsDevices[ device ].name << ")!";
6188 errorText_ = errorStream_.str();
6193 ZeroMemory( audioPtr, dataLen );
6195 // Unlock the buffer
6196 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6197 if ( FAILED( result ) ) {
6200 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking input buffer (" << dsDevices[ device ].name << ")!";
6201 errorText_ = errorStream_.str();
6205 ohandle = (void *) input;
6206 bhandle = (void *) buffer;
6209 // Set various stream parameters
6210 DsHandle *handle = 0;
6211 stream_.nDeviceChannels[mode] = channels + firstChannel;
6212 stream_.nUserChannels[mode] = channels;
6213 stream_.bufferSize = *bufferSize;
6214 stream_.channelOffset[mode] = firstChannel;
6215 stream_.deviceInterleaved[mode] = true;
6216 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
6217 else stream_.userInterleaved = true;
6219 // Set flag for buffer conversion
6220 stream_.doConvertBuffer[mode] = false;
6221 if (stream_.nUserChannels[mode] != stream_.nDeviceChannels[mode])
6222 stream_.doConvertBuffer[mode] = true;
6223 if (stream_.userFormat != stream_.deviceFormat[mode])
6224 stream_.doConvertBuffer[mode] = true;
6225 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
6226 stream_.nUserChannels[mode] > 1 )
6227 stream_.doConvertBuffer[mode] = true;
6229 // Allocate necessary internal buffers
6230 long bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
6231 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
6232 if ( stream_.userBuffer[mode] == NULL ) {
6233 errorText_ = "RtApiDs::probeDeviceOpen: error allocating user buffer memory.";
6237 if ( stream_.doConvertBuffer[mode] ) {
6239 bool makeBuffer = true;
6240 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
6241 if ( mode == INPUT ) {
6242 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
6243 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
6244 if ( bufferBytes <= (long) bytesOut ) makeBuffer = false;
6249 bufferBytes *= *bufferSize;
6250 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
6251 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
6252 if ( stream_.deviceBuffer == NULL ) {
6253 errorText_ = "RtApiDs::probeDeviceOpen: error allocating device buffer memory.";
6259 // Allocate our DsHandle structures for the stream.
6260 if ( stream_.apiHandle == 0 ) {
6262 handle = new DsHandle;
6264 catch ( std::bad_alloc& ) {
6265 errorText_ = "RtApiDs::probeDeviceOpen: error allocating AsioHandle memory.";
6269 // Create a manual-reset event.
6270 handle->condition = CreateEvent( NULL, // no security
6271 TRUE, // manual-reset
6272 FALSE, // non-signaled initially
6274 stream_.apiHandle = (void *) handle;
6277 handle = (DsHandle *) stream_.apiHandle;
6278 handle->id[mode] = ohandle;
6279 handle->buffer[mode] = bhandle;
6280 handle->dsBufferSize[mode] = dsBufferSize;
6281 handle->dsPointerLeadTime[mode] = dsPointerLeadTime;
6283 stream_.device[mode] = device;
6284 stream_.state = STREAM_STOPPED;
6285 if ( stream_.mode == OUTPUT && mode == INPUT )
6286 // We had already set up an output stream.
6287 stream_.mode = DUPLEX;
6289 stream_.mode = mode;
6290 stream_.nBuffers = nBuffers;
6291 stream_.sampleRate = sampleRate;
6293 // Setup the buffer conversion information structure.
6294 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
6296 // Setup the callback thread.
6297 if ( stream_.callbackInfo.isRunning == false ) {
6299 stream_.callbackInfo.isRunning = true;
6300 stream_.callbackInfo.object = (void *) this;
6301 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &callbackHandler,
6302 &stream_.callbackInfo, 0, &threadId );
6303 if ( stream_.callbackInfo.thread == 0 ) {
6304 errorText_ = "RtApiDs::probeDeviceOpen: error creating callback thread!";
6308 // Boost DS thread priority
6309 SetThreadPriority( (HANDLE) stream_.callbackInfo.thread, THREAD_PRIORITY_HIGHEST );
6315 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
6316 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
6317 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6318 if ( buffer ) buffer->Release();
6321 if ( handle->buffer[1] ) {
6322 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
6323 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6324 if ( buffer ) buffer->Release();
6327 CloseHandle( handle->condition );
6329 stream_.apiHandle = 0;
6332 for ( int i=0; i<2; i++ ) {
6333 if ( stream_.userBuffer[i] ) {
6334 free( stream_.userBuffer[i] );
6335 stream_.userBuffer[i] = 0;
6339 if ( stream_.deviceBuffer ) {
6340 free( stream_.deviceBuffer );
6341 stream_.deviceBuffer = 0;
6344 stream_.state = STREAM_CLOSED;
6348 void RtApiDs :: closeStream()
6350 if ( stream_.state == STREAM_CLOSED ) {
6351 errorText_ = "RtApiDs::closeStream(): no open stream to close!";
6352 error( RtAudioError::WARNING );
6356 // Stop the callback thread.
6357 stream_.callbackInfo.isRunning = false;
6358 WaitForSingleObject( (HANDLE) stream_.callbackInfo.thread, INFINITE );
6359 CloseHandle( (HANDLE) stream_.callbackInfo.thread );
6361 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6363 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
6364 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
6365 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6372 if ( handle->buffer[1] ) {
6373 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
6374 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6381 CloseHandle( handle->condition );
6383 stream_.apiHandle = 0;
6386 for ( int i=0; i<2; i++ ) {
6387 if ( stream_.userBuffer[i] ) {
6388 free( stream_.userBuffer[i] );
6389 stream_.userBuffer[i] = 0;
6393 if ( stream_.deviceBuffer ) {
6394 free( stream_.deviceBuffer );
6395 stream_.deviceBuffer = 0;
6398 stream_.mode = UNINITIALIZED;
6399 stream_.state = STREAM_CLOSED;
6402 void RtApiDs :: startStream()
6405 if ( stream_.state == STREAM_RUNNING ) {
6406 errorText_ = "RtApiDs::startStream(): the stream is already running!";
6407 error( RtAudioError::WARNING );
6411 #if defined( HAVE_GETTIMEOFDAY )
6412 gettimeofday( &stream_.lastTickTimestamp, NULL );
6415 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6417 // Increase scheduler frequency on lesser windows (a side-effect of
6418 // increasing timer accuracy). On greater windows (Win2K or later),
6419 // this is already in effect.
6420 timeBeginPeriod( 1 );
6422 buffersRolling = false;
6423 duplexPrerollBytes = 0;
6425 if ( stream_.mode == DUPLEX ) {
6426 // 0.5 seconds of silence in DUPLEX mode while the devices spin up and synchronize.
6427 duplexPrerollBytes = (int) ( 0.5 * stream_.sampleRate * formatBytes( stream_.deviceFormat[1] ) * stream_.nDeviceChannels[1] );
6431 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6433 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6434 result = buffer->Play( 0, 0, DSBPLAY_LOOPING );
6435 if ( FAILED( result ) ) {
6436 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting output buffer!";
6437 errorText_ = errorStream_.str();
6442 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6444 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6445 result = buffer->Start( DSCBSTART_LOOPING );
6446 if ( FAILED( result ) ) {
6447 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting input buffer!";
6448 errorText_ = errorStream_.str();
6453 handle->drainCounter = 0;
6454 handle->internalDrain = false;
6455 ResetEvent( handle->condition );
6456 stream_.state = STREAM_RUNNING;
6459 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
6462 void RtApiDs :: stopStream()
6465 if ( stream_.state == STREAM_STOPPED ) {
6466 errorText_ = "RtApiDs::stopStream(): the stream is already stopped!";
6467 error( RtAudioError::WARNING );
6474 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6475 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6476 if ( handle->drainCounter == 0 ) {
6477 handle->drainCounter = 2;
6478 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
6481 stream_.state = STREAM_STOPPED;
6483 MUTEX_LOCK( &stream_.mutex );
6485 // Stop the buffer and clear memory
6486 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6487 result = buffer->Stop();
6488 if ( FAILED( result ) ) {
6489 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping output buffer!";
6490 errorText_ = errorStream_.str();
6494 // Lock the buffer and clear it so that if we start to play again,
6495 // we won't have old data playing.
6496 result = buffer->Lock( 0, handle->dsBufferSize[0], &audioPtr, &dataLen, NULL, NULL, 0 );
6497 if ( FAILED( result ) ) {
6498 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking output buffer!";
6499 errorText_ = errorStream_.str();
6503 // Zero the DS buffer
6504 ZeroMemory( audioPtr, dataLen );
6506 // Unlock the DS buffer
6507 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6508 if ( FAILED( result ) ) {
6509 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking output buffer!";
6510 errorText_ = errorStream_.str();
6514 // If we start playing again, we must begin at beginning of buffer.
6515 handle->bufferPointer[0] = 0;
6518 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6519 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6523 stream_.state = STREAM_STOPPED;
6525 if ( stream_.mode != DUPLEX )
6526 MUTEX_LOCK( &stream_.mutex );
6528 result = buffer->Stop();
6529 if ( FAILED( result ) ) {
6530 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping input buffer!";
6531 errorText_ = errorStream_.str();
6535 // Lock the buffer and clear it so that if we start to play again,
6536 // we won't have old data playing.
6537 result = buffer->Lock( 0, handle->dsBufferSize[1], &audioPtr, &dataLen, NULL, NULL, 0 );
6538 if ( FAILED( result ) ) {
6539 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking input buffer!";
6540 errorText_ = errorStream_.str();
6544 // Zero the DS buffer
6545 ZeroMemory( audioPtr, dataLen );
6547 // Unlock the DS buffer
6548 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6549 if ( FAILED( result ) ) {
6550 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking input buffer!";
6551 errorText_ = errorStream_.str();
6555 // If we start recording again, we must begin at beginning of buffer.
6556 handle->bufferPointer[1] = 0;
6560 timeEndPeriod( 1 ); // revert to normal scheduler frequency on lesser windows.
6561 MUTEX_UNLOCK( &stream_.mutex );
6563 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
6566 void RtApiDs :: abortStream()
6569 if ( stream_.state == STREAM_STOPPED ) {
6570 errorText_ = "RtApiDs::abortStream(): the stream is already stopped!";
6571 error( RtAudioError::WARNING );
6575 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6576 handle->drainCounter = 2;
6581 void RtApiDs :: callbackEvent()
6583 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) {
6584 Sleep( 50 ); // sleep 50 milliseconds
6588 if ( stream_.state == STREAM_CLOSED ) {
6589 errorText_ = "RtApiDs::callbackEvent(): the stream is closed ... this shouldn't happen!";
6590 error( RtAudioError::WARNING );
6594 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
6595 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6597 // Check if we were draining the stream and signal is finished.
6598 if ( handle->drainCounter > stream_.nBuffers + 2 ) {
6600 stream_.state = STREAM_STOPPING;
6601 if ( handle->internalDrain == false )
6602 SetEvent( handle->condition );
6608 // Invoke user callback to get fresh output data UNLESS we are
6610 if ( handle->drainCounter == 0 ) {
6611 RtAudioCallback callback = (RtAudioCallback) info->callback;
6612 double streamTime = getStreamTime();
6613 RtAudioStreamStatus status = 0;
6614 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
6615 status |= RTAUDIO_OUTPUT_UNDERFLOW;
6616 handle->xrun[0] = false;
6618 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
6619 status |= RTAUDIO_INPUT_OVERFLOW;
6620 handle->xrun[1] = false;
6622 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
6623 stream_.bufferSize, streamTime, status, info->userData );
6624 if ( cbReturnValue == 2 ) {
6625 stream_.state = STREAM_STOPPING;
6626 handle->drainCounter = 2;
6630 else if ( cbReturnValue == 1 ) {
6631 handle->drainCounter = 1;
6632 handle->internalDrain = true;
6637 DWORD currentWritePointer, safeWritePointer;
6638 DWORD currentReadPointer, safeReadPointer;
6639 UINT nextWritePointer;
6641 LPVOID buffer1 = NULL;
6642 LPVOID buffer2 = NULL;
6643 DWORD bufferSize1 = 0;
6644 DWORD bufferSize2 = 0;
6649 MUTEX_LOCK( &stream_.mutex );
6650 if ( stream_.state == STREAM_STOPPED ) {
6651 MUTEX_UNLOCK( &stream_.mutex );
6655 if ( buffersRolling == false ) {
6656 if ( stream_.mode == DUPLEX ) {
6657 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
6659 // It takes a while for the devices to get rolling. As a result,
6660 // there's no guarantee that the capture and write device pointers
6661 // will move in lockstep. Wait here for both devices to start
6662 // rolling, and then set our buffer pointers accordingly.
6663 // e.g. Crystal Drivers: the capture buffer starts up 5700 to 9600
6664 // bytes later than the write buffer.
6666 // Stub: a serious risk of having a pre-emptive scheduling round
6667 // take place between the two GetCurrentPosition calls... but I'm
6668 // really not sure how to solve the problem. Temporarily boost to
6669 // Realtime priority, maybe; but I'm not sure what priority the
6670 // DirectSound service threads run at. We *should* be roughly
6671 // within a ms or so of correct.
6673 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6674 LPDIRECTSOUNDCAPTUREBUFFER dsCaptureBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6676 DWORD startSafeWritePointer, startSafeReadPointer;
6678 result = dsWriteBuffer->GetCurrentPosition( NULL, &startSafeWritePointer );
6679 if ( FAILED( result ) ) {
6680 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6681 errorText_ = errorStream_.str();
6682 MUTEX_UNLOCK( &stream_.mutex );
6683 error( RtAudioError::SYSTEM_ERROR );
6686 result = dsCaptureBuffer->GetCurrentPosition( NULL, &startSafeReadPointer );
6687 if ( FAILED( result ) ) {
6688 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6689 errorText_ = errorStream_.str();
6690 MUTEX_UNLOCK( &stream_.mutex );
6691 error( RtAudioError::SYSTEM_ERROR );
6695 result = dsWriteBuffer->GetCurrentPosition( NULL, &safeWritePointer );
6696 if ( FAILED( result ) ) {
6697 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6698 errorText_ = errorStream_.str();
6699 MUTEX_UNLOCK( &stream_.mutex );
6700 error( RtAudioError::SYSTEM_ERROR );
6703 result = dsCaptureBuffer->GetCurrentPosition( NULL, &safeReadPointer );
6704 if ( FAILED( result ) ) {
6705 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6706 errorText_ = errorStream_.str();
6707 MUTEX_UNLOCK( &stream_.mutex );
6708 error( RtAudioError::SYSTEM_ERROR );
6711 if ( safeWritePointer != startSafeWritePointer && safeReadPointer != startSafeReadPointer ) break;
6715 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
6717 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
6718 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
6719 handle->bufferPointer[1] = safeReadPointer;
6721 else if ( stream_.mode == OUTPUT ) {
6723 // Set the proper nextWritePosition after initial startup.
6724 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6725 result = dsWriteBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
6726 if ( FAILED( result ) ) {
6727 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6728 errorText_ = errorStream_.str();
6729 MUTEX_UNLOCK( &stream_.mutex );
6730 error( RtAudioError::SYSTEM_ERROR );
6733 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
6734 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
6737 buffersRolling = true;
6740 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6742 LPDIRECTSOUNDBUFFER dsBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6744 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
6745 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
6746 bufferBytes *= formatBytes( stream_.userFormat );
6747 memset( stream_.userBuffer[0], 0, bufferBytes );
6750 // Setup parameters and do buffer conversion if necessary.
6751 if ( stream_.doConvertBuffer[0] ) {
6752 buffer = stream_.deviceBuffer;
6753 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
6754 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[0];
6755 bufferBytes *= formatBytes( stream_.deviceFormat[0] );
6758 buffer = stream_.userBuffer[0];
6759 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
6760 bufferBytes *= formatBytes( stream_.userFormat );
6763 // No byte swapping necessary in DirectSound implementation.
6765 // Ahhh ... windoze. 16-bit data is signed but 8-bit data is
6766 // unsigned. So, we need to convert our signed 8-bit data here to
6768 if ( stream_.deviceFormat[0] == RTAUDIO_SINT8 )
6769 for ( int i=0; i<bufferBytes; i++ ) buffer[i] = (unsigned char) ( buffer[i] + 128 );
6771 DWORD dsBufferSize = handle->dsBufferSize[0];
6772 nextWritePointer = handle->bufferPointer[0];
6774 DWORD endWrite, leadPointer;
6776 // Find out where the read and "safe write" pointers are.
6777 result = dsBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
6778 if ( FAILED( result ) ) {
6779 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6780 errorText_ = errorStream_.str();
6781 MUTEX_UNLOCK( &stream_.mutex );
6782 error( RtAudioError::SYSTEM_ERROR );
6786 // We will copy our output buffer into the region between
6787 // safeWritePointer and leadPointer. If leadPointer is not
6788 // beyond the next endWrite position, wait until it is.
6789 leadPointer = safeWritePointer + handle->dsPointerLeadTime[0];
6790 //std::cout << "safeWritePointer = " << safeWritePointer << ", leadPointer = " << leadPointer << ", nextWritePointer = " << nextWritePointer << std::endl;
6791 if ( leadPointer > dsBufferSize ) leadPointer -= dsBufferSize;
6792 if ( leadPointer < nextWritePointer ) leadPointer += dsBufferSize; // unwrap offset
6793 endWrite = nextWritePointer + bufferBytes;
6795 // Check whether the entire write region is behind the play pointer.
6796 if ( leadPointer >= endWrite ) break;
6798 // If we are here, then we must wait until the leadPointer advances
6799 // beyond the end of our next write region. We use the
6800 // Sleep() function to suspend operation until that happens.
6801 double millis = ( endWrite - leadPointer ) * 1000.0;
6802 millis /= ( formatBytes( stream_.deviceFormat[0]) * stream_.nDeviceChannels[0] * stream_.sampleRate);
6803 if ( millis < 1.0 ) millis = 1.0;
6804 Sleep( (DWORD) millis );
6807 if ( dsPointerBetween( nextWritePointer, safeWritePointer, currentWritePointer, dsBufferSize )
6808 || dsPointerBetween( endWrite, safeWritePointer, currentWritePointer, dsBufferSize ) ) {
6809 // We've strayed into the forbidden zone ... resync the read pointer.
6810 handle->xrun[0] = true;
6811 nextWritePointer = safeWritePointer + handle->dsPointerLeadTime[0] - bufferBytes;
6812 if ( nextWritePointer >= dsBufferSize ) nextWritePointer -= dsBufferSize;
6813 handle->bufferPointer[0] = nextWritePointer;
6814 endWrite = nextWritePointer + bufferBytes;
6817 // Lock free space in the buffer
6818 result = dsBuffer->Lock( nextWritePointer, bufferBytes, &buffer1,
6819 &bufferSize1, &buffer2, &bufferSize2, 0 );
6820 if ( FAILED( result ) ) {
6821 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking buffer during playback!";
6822 errorText_ = errorStream_.str();
6823 MUTEX_UNLOCK( &stream_.mutex );
6824 error( RtAudioError::SYSTEM_ERROR );
6828 // Copy our buffer into the DS buffer
6829 CopyMemory( buffer1, buffer, bufferSize1 );
6830 if ( buffer2 != NULL ) CopyMemory( buffer2, buffer+bufferSize1, bufferSize2 );
6832 // Update our buffer offset and unlock sound buffer
6833 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
6834 if ( FAILED( result ) ) {
6835 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking buffer during playback!";
6836 errorText_ = errorStream_.str();
6837 MUTEX_UNLOCK( &stream_.mutex );
6838 error( RtAudioError::SYSTEM_ERROR );
6841 nextWritePointer = ( nextWritePointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
6842 handle->bufferPointer[0] = nextWritePointer;
6845 // Don't bother draining input
6846 if ( handle->drainCounter ) {
6847 handle->drainCounter++;
6851 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6853 // Setup parameters.
6854 if ( stream_.doConvertBuffer[1] ) {
6855 buffer = stream_.deviceBuffer;
6856 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[1];
6857 bufferBytes *= formatBytes( stream_.deviceFormat[1] );
6860 buffer = stream_.userBuffer[1];
6861 bufferBytes = stream_.bufferSize * stream_.nUserChannels[1];
6862 bufferBytes *= formatBytes( stream_.userFormat );
6865 LPDIRECTSOUNDCAPTUREBUFFER dsBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6866 long nextReadPointer = handle->bufferPointer[1];
6867 DWORD dsBufferSize = handle->dsBufferSize[1];
6869 // Find out where the write and "safe read" pointers are.
6870 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
6871 if ( FAILED( result ) ) {
6872 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6873 errorText_ = errorStream_.str();
6874 MUTEX_UNLOCK( &stream_.mutex );
6875 error( RtAudioError::SYSTEM_ERROR );
6879 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
6880 DWORD endRead = nextReadPointer + bufferBytes;
6882 // Handling depends on whether we are INPUT or DUPLEX.
6883 // If we're in INPUT mode then waiting is a good thing. If we're in DUPLEX mode,
6884 // then a wait here will drag the write pointers into the forbidden zone.
6886 // In DUPLEX mode, rather than wait, we will back off the read pointer until
6887 // it's in a safe position. This causes dropouts, but it seems to be the only
6888 // practical way to sync up the read and write pointers reliably, given the
6889 // the very complex relationship between phase and increment of the read and write
6892 // In order to minimize audible dropouts in DUPLEX mode, we will
6893 // provide a pre-roll period of 0.5 seconds in which we return
6894 // zeros from the read buffer while the pointers sync up.
6896 if ( stream_.mode == DUPLEX ) {
6897 if ( safeReadPointer < endRead ) {
6898 if ( duplexPrerollBytes <= 0 ) {
6899 // Pre-roll time over. Be more agressive.
6900 int adjustment = endRead-safeReadPointer;
6902 handle->xrun[1] = true;
6904 // - large adjustments: we've probably run out of CPU cycles, so just resync exactly,
6905 // and perform fine adjustments later.
6906 // - small adjustments: back off by twice as much.
6907 if ( adjustment >= 2*bufferBytes )
6908 nextReadPointer = safeReadPointer-2*bufferBytes;
6910 nextReadPointer = safeReadPointer-bufferBytes-adjustment;
6912 if ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
6916 // In pre=roll time. Just do it.
6917 nextReadPointer = safeReadPointer - bufferBytes;
6918 while ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
6920 endRead = nextReadPointer + bufferBytes;
6923 else { // mode == INPUT
6924 while ( safeReadPointer < endRead && stream_.callbackInfo.isRunning ) {
6925 // See comments for playback.
6926 double millis = (endRead - safeReadPointer) * 1000.0;
6927 millis /= ( formatBytes(stream_.deviceFormat[1]) * stream_.nDeviceChannels[1] * stream_.sampleRate);
6928 if ( millis < 1.0 ) millis = 1.0;
6929 Sleep( (DWORD) millis );
6931 // Wake up and find out where we are now.
6932 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
6933 if ( FAILED( result ) ) {
6934 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6935 errorText_ = errorStream_.str();
6936 MUTEX_UNLOCK( &stream_.mutex );
6937 error( RtAudioError::SYSTEM_ERROR );
6941 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
6945 // Lock free space in the buffer
6946 result = dsBuffer->Lock( nextReadPointer, bufferBytes, &buffer1,
6947 &bufferSize1, &buffer2, &bufferSize2, 0 );
6948 if ( FAILED( result ) ) {
6949 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking capture buffer!";
6950 errorText_ = errorStream_.str();
6951 MUTEX_UNLOCK( &stream_.mutex );
6952 error( RtAudioError::SYSTEM_ERROR );
6956 if ( duplexPrerollBytes <= 0 ) {
6957 // Copy our buffer into the DS buffer
6958 CopyMemory( buffer, buffer1, bufferSize1 );
6959 if ( buffer2 != NULL ) CopyMemory( buffer+bufferSize1, buffer2, bufferSize2 );
6962 memset( buffer, 0, bufferSize1 );
6963 if ( buffer2 != NULL ) memset( buffer + bufferSize1, 0, bufferSize2 );
6964 duplexPrerollBytes -= bufferSize1 + bufferSize2;
6967 // Update our buffer offset and unlock sound buffer
6968 nextReadPointer = ( nextReadPointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
6969 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
6970 if ( FAILED( result ) ) {
6971 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking capture buffer!";
6972 errorText_ = errorStream_.str();
6973 MUTEX_UNLOCK( &stream_.mutex );
6974 error( RtAudioError::SYSTEM_ERROR );
6977 handle->bufferPointer[1] = nextReadPointer;
6979 // No byte swapping necessary in DirectSound implementation.
6981 // If necessary, convert 8-bit data from unsigned to signed.
6982 if ( stream_.deviceFormat[1] == RTAUDIO_SINT8 )
6983 for ( int j=0; j<bufferBytes; j++ ) buffer[j] = (signed char) ( buffer[j] - 128 );
6985 // Do buffer conversion if necessary.
6986 if ( stream_.doConvertBuffer[1] )
6987 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
6991 MUTEX_UNLOCK( &stream_.mutex );
6992 RtApi::tickStreamTime();
6995 // Definitions for utility functions and callbacks
6996 // specific to the DirectSound implementation.
6998 static unsigned __stdcall callbackHandler( void *ptr )
7000 CallbackInfo *info = (CallbackInfo *) ptr;
7001 RtApiDs *object = (RtApiDs *) info->object;
7002 bool* isRunning = &info->isRunning;
7004 while ( *isRunning == true ) {
7005 object->callbackEvent();
7012 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
7013 LPCTSTR description,
7017 struct DsProbeData& probeInfo = *(struct DsProbeData*) lpContext;
7018 std::vector<struct DsDevice>& dsDevices = *probeInfo.dsDevices;
7021 bool validDevice = false;
7022 if ( probeInfo.isInput == true ) {
7024 LPDIRECTSOUNDCAPTURE object;
7026 hr = DirectSoundCaptureCreate( lpguid, &object, NULL );
7027 if ( hr != DS_OK ) return TRUE;
7029 caps.dwSize = sizeof(caps);
7030 hr = object->GetCaps( &caps );
7031 if ( hr == DS_OK ) {
7032 if ( caps.dwChannels > 0 && caps.dwFormats > 0 )
7039 LPDIRECTSOUND object;
7040 hr = DirectSoundCreate( lpguid, &object, NULL );
7041 if ( hr != DS_OK ) return TRUE;
7043 caps.dwSize = sizeof(caps);
7044 hr = object->GetCaps( &caps );
7045 if ( hr == DS_OK ) {
7046 if ( caps.dwFlags & DSCAPS_PRIMARYMONO || caps.dwFlags & DSCAPS_PRIMARYSTEREO )
7052 // If good device, then save its name and guid.
7053 std::string name = convertCharPointerToStdString( description );
7054 //if ( name == "Primary Sound Driver" || name == "Primary Sound Capture Driver" )
7055 if ( lpguid == NULL )
7056 name = "Default Device";
7057 if ( validDevice ) {
7058 for ( unsigned int i=0; i<dsDevices.size(); i++ ) {
7059 if ( dsDevices[i].name == name ) {
7060 dsDevices[i].found = true;
7061 if ( probeInfo.isInput ) {
7062 dsDevices[i].id[1] = lpguid;
7063 dsDevices[i].validId[1] = true;
7066 dsDevices[i].id[0] = lpguid;
7067 dsDevices[i].validId[0] = true;
7075 device.found = true;
7076 if ( probeInfo.isInput ) {
7077 device.id[1] = lpguid;
7078 device.validId[1] = true;
7081 device.id[0] = lpguid;
7082 device.validId[0] = true;
7084 dsDevices.push_back( device );
7090 static const char* getErrorString( int code )
7094 case DSERR_ALLOCATED:
7095 return "Already allocated";
7097 case DSERR_CONTROLUNAVAIL:
7098 return "Control unavailable";
7100 case DSERR_INVALIDPARAM:
7101 return "Invalid parameter";
7103 case DSERR_INVALIDCALL:
7104 return "Invalid call";
7107 return "Generic error";
7109 case DSERR_PRIOLEVELNEEDED:
7110 return "Priority level needed";
7112 case DSERR_OUTOFMEMORY:
7113 return "Out of memory";
7115 case DSERR_BADFORMAT:
7116 return "The sample rate or the channel format is not supported";
7118 case DSERR_UNSUPPORTED:
7119 return "Not supported";
7121 case DSERR_NODRIVER:
7124 case DSERR_ALREADYINITIALIZED:
7125 return "Already initialized";
7127 case DSERR_NOAGGREGATION:
7128 return "No aggregation";
7130 case DSERR_BUFFERLOST:
7131 return "Buffer lost";
7133 case DSERR_OTHERAPPHASPRIO:
7134 return "Another application already has priority";
7136 case DSERR_UNINITIALIZED:
7137 return "Uninitialized";
7140 return "DirectSound unknown error";
7143 //******************** End of __WINDOWS_DS__ *********************//
7147 #if defined(__LINUX_ALSA__)
7149 #include <alsa/asoundlib.h>
7152 // A structure to hold various information related to the ALSA API
7155 snd_pcm_t *handles[2];
7158 pthread_cond_t runnable_cv;
7162 :synchronized(false), runnable(false) { xrun[0] = false; xrun[1] = false; }
7165 static void *alsaCallbackHandler( void * ptr );
7167 RtApiAlsa :: RtApiAlsa()
7169 // Nothing to do here.
7172 RtApiAlsa :: ~RtApiAlsa()
7174 if ( stream_.state != STREAM_CLOSED ) closeStream();
7177 unsigned int RtApiAlsa :: getDeviceCount( void )
7179 unsigned nDevices = 0;
7180 int result, subdevice, card;
7184 // Count cards and devices
7186 snd_card_next( &card );
7187 while ( card >= 0 ) {
7188 sprintf( name, "hw:%d", card );
7189 result = snd_ctl_open( &handle, name, 0 );
7191 errorStream_ << "RtApiAlsa::getDeviceCount: control open, card = " << card << ", " << snd_strerror( result ) << ".";
7192 errorText_ = errorStream_.str();
7193 error( RtAudioError::WARNING );
7198 result = snd_ctl_pcm_next_device( handle, &subdevice );
7200 errorStream_ << "RtApiAlsa::getDeviceCount: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
7201 errorText_ = errorStream_.str();
7202 error( RtAudioError::WARNING );
7205 if ( subdevice < 0 )
7210 snd_ctl_close( handle );
7211 snd_card_next( &card );
7214 result = snd_ctl_open( &handle, "default", 0 );
7217 snd_ctl_close( handle );
7223 RtAudio::DeviceInfo RtApiAlsa :: getDeviceInfo( unsigned int device )
7225 RtAudio::DeviceInfo info;
7226 info.probed = false;
7228 unsigned nDevices = 0;
7229 int result, subdevice, card;
7233 // Count cards and devices
7236 snd_card_next( &card );
7237 while ( card >= 0 ) {
7238 sprintf( name, "hw:%d", card );
7239 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
7241 errorStream_ << "RtApiAlsa::getDeviceInfo: control open, card = " << card << ", " << snd_strerror( result ) << ".";
7242 errorText_ = errorStream_.str();
7243 error( RtAudioError::WARNING );
7248 result = snd_ctl_pcm_next_device( chandle, &subdevice );
7250 errorStream_ << "RtApiAlsa::getDeviceInfo: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
7251 errorText_ = errorStream_.str();
7252 error( RtAudioError::WARNING );
7255 if ( subdevice < 0 ) break;
7256 if ( nDevices == device ) {
7257 sprintf( name, "hw:%d,%d", card, subdevice );
7263 snd_ctl_close( chandle );
7264 snd_card_next( &card );
7267 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
7268 if ( result == 0 ) {
7269 if ( nDevices == device ) {
7270 strcpy( name, "default" );
7276 if ( nDevices == 0 ) {
7277 errorText_ = "RtApiAlsa::getDeviceInfo: no devices found!";
7278 error( RtAudioError::INVALID_USE );
7282 if ( device >= nDevices ) {
7283 errorText_ = "RtApiAlsa::getDeviceInfo: device ID is invalid!";
7284 error( RtAudioError::INVALID_USE );
7290 // If a stream is already open, we cannot probe the stream devices.
7291 // Thus, use the saved results.
7292 if ( stream_.state != STREAM_CLOSED &&
7293 ( stream_.device[0] == device || stream_.device[1] == device ) ) {
7294 snd_ctl_close( chandle );
7295 if ( device >= devices_.size() ) {
7296 errorText_ = "RtApiAlsa::getDeviceInfo: device ID was not present before stream was opened.";
7297 error( RtAudioError::WARNING );
7300 return devices_[ device ];
7303 int openMode = SND_PCM_ASYNC;
7304 snd_pcm_stream_t stream;
7305 snd_pcm_info_t *pcminfo;
7306 snd_pcm_info_alloca( &pcminfo );
7308 snd_pcm_hw_params_t *params;
7309 snd_pcm_hw_params_alloca( ¶ms );
7311 // First try for playback unless default device (which has subdev -1)
7312 stream = SND_PCM_STREAM_PLAYBACK;
7313 snd_pcm_info_set_stream( pcminfo, stream );
7314 if ( subdevice != -1 ) {
7315 snd_pcm_info_set_device( pcminfo, subdevice );
7316 snd_pcm_info_set_subdevice( pcminfo, 0 );
7318 result = snd_ctl_pcm_info( chandle, pcminfo );
7320 // Device probably doesn't support playback.
7325 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK );
7327 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7328 errorText_ = errorStream_.str();
7329 error( RtAudioError::WARNING );
7333 // The device is open ... fill the parameter structure.
7334 result = snd_pcm_hw_params_any( phandle, params );
7336 snd_pcm_close( phandle );
7337 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7338 errorText_ = errorStream_.str();
7339 error( RtAudioError::WARNING );
7343 // Get output channel information.
7345 result = snd_pcm_hw_params_get_channels_max( params, &value );
7347 snd_pcm_close( phandle );
7348 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") output channels, " << snd_strerror( result ) << ".";
7349 errorText_ = errorStream_.str();
7350 error( RtAudioError::WARNING );
7353 info.outputChannels = value;
7354 snd_pcm_close( phandle );
7357 stream = SND_PCM_STREAM_CAPTURE;
7358 snd_pcm_info_set_stream( pcminfo, stream );
7360 // Now try for capture unless default device (with subdev = -1)
7361 if ( subdevice != -1 ) {
7362 result = snd_ctl_pcm_info( chandle, pcminfo );
7363 snd_ctl_close( chandle );
7365 // Device probably doesn't support capture.
7366 if ( info.outputChannels == 0 ) return info;
7367 goto probeParameters;
7371 snd_ctl_close( chandle );
7373 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
7375 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7376 errorText_ = errorStream_.str();
7377 error( RtAudioError::WARNING );
7378 if ( info.outputChannels == 0 ) return info;
7379 goto probeParameters;
7382 // The device is open ... fill the parameter structure.
7383 result = snd_pcm_hw_params_any( phandle, params );
7385 snd_pcm_close( phandle );
7386 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7387 errorText_ = errorStream_.str();
7388 error( RtAudioError::WARNING );
7389 if ( info.outputChannels == 0 ) return info;
7390 goto probeParameters;
7393 result = snd_pcm_hw_params_get_channels_max( params, &value );
7395 snd_pcm_close( phandle );
7396 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") input channels, " << snd_strerror( result ) << ".";
7397 errorText_ = errorStream_.str();
7398 error( RtAudioError::WARNING );
7399 if ( info.outputChannels == 0 ) return info;
7400 goto probeParameters;
7402 info.inputChannels = value;
7403 snd_pcm_close( phandle );
7405 // If device opens for both playback and capture, we determine the channels.
7406 if ( info.outputChannels > 0 && info.inputChannels > 0 )
7407 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
7409 // ALSA doesn't provide default devices so we'll use the first available one.
7410 if ( device == 0 && info.outputChannels > 0 )
7411 info.isDefaultOutput = true;
7412 if ( device == 0 && info.inputChannels > 0 )
7413 info.isDefaultInput = true;
7416 // At this point, we just need to figure out the supported data
7417 // formats and sample rates. We'll proceed by opening the device in
7418 // the direction with the maximum number of channels, or playback if
7419 // they are equal. This might limit our sample rate options, but so
7422 if ( info.outputChannels >= info.inputChannels )
7423 stream = SND_PCM_STREAM_PLAYBACK;
7425 stream = SND_PCM_STREAM_CAPTURE;
7426 snd_pcm_info_set_stream( pcminfo, stream );
7428 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
7430 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7431 errorText_ = errorStream_.str();
7432 error( RtAudioError::WARNING );
7436 // The device is open ... fill the parameter structure.
7437 result = snd_pcm_hw_params_any( phandle, params );
7439 snd_pcm_close( phandle );
7440 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7441 errorText_ = errorStream_.str();
7442 error( RtAudioError::WARNING );
7446 // Test our discrete set of sample rate values.
7447 info.sampleRates.clear();
7448 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
7449 if ( snd_pcm_hw_params_test_rate( phandle, params, SAMPLE_RATES[i], 0 ) == 0 ) {
7450 info.sampleRates.push_back( SAMPLE_RATES[i] );
7452 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
7453 info.preferredSampleRate = SAMPLE_RATES[i];
7456 if ( info.sampleRates.size() == 0 ) {
7457 snd_pcm_close( phandle );
7458 errorStream_ << "RtApiAlsa::getDeviceInfo: no supported sample rates found for device (" << name << ").";
7459 errorText_ = errorStream_.str();
7460 error( RtAudioError::WARNING );
7464 // Probe the supported data formats ... we don't care about endian-ness just yet
7465 snd_pcm_format_t format;
7466 info.nativeFormats = 0;
7467 format = SND_PCM_FORMAT_S8;
7468 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7469 info.nativeFormats |= RTAUDIO_SINT8;
7470 format = SND_PCM_FORMAT_S16;
7471 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7472 info.nativeFormats |= RTAUDIO_SINT16;
7473 format = SND_PCM_FORMAT_S24;
7474 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7475 info.nativeFormats |= RTAUDIO_SINT24;
7476 format = SND_PCM_FORMAT_S32;
7477 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7478 info.nativeFormats |= RTAUDIO_SINT32;
7479 format = SND_PCM_FORMAT_FLOAT;
7480 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7481 info.nativeFormats |= RTAUDIO_FLOAT32;
7482 format = SND_PCM_FORMAT_FLOAT64;
7483 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7484 info.nativeFormats |= RTAUDIO_FLOAT64;
7486 // Check that we have at least one supported format
7487 if ( info.nativeFormats == 0 ) {
7488 snd_pcm_close( phandle );
7489 errorStream_ << "RtApiAlsa::getDeviceInfo: pcm device (" << name << ") data format not supported by RtAudio.";
7490 errorText_ = errorStream_.str();
7491 error( RtAudioError::WARNING );
7495 // Get the device name
7497 result = snd_card_get_name( card, &cardname );
7498 if ( result >= 0 ) {
7499 sprintf( name, "hw:%s,%d", cardname, subdevice );
7504 // That's all ... close the device and return
7505 snd_pcm_close( phandle );
7510 void RtApiAlsa :: saveDeviceInfo( void )
7514 unsigned int nDevices = getDeviceCount();
7515 devices_.resize( nDevices );
7516 for ( unsigned int i=0; i<nDevices; i++ )
7517 devices_[i] = getDeviceInfo( i );
7520 bool RtApiAlsa :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
7521 unsigned int firstChannel, unsigned int sampleRate,
7522 RtAudioFormat format, unsigned int *bufferSize,
7523 RtAudio::StreamOptions *options )
7526 #if defined(__RTAUDIO_DEBUG__)
7528 snd_output_stdio_attach(&out, stderr, 0);
7531 // I'm not using the "plug" interface ... too much inconsistent behavior.
7533 unsigned nDevices = 0;
7534 int result, subdevice, card;
7538 if ( options && options->flags & RTAUDIO_ALSA_USE_DEFAULT )
7539 snprintf(name, sizeof(name), "%s", "default");
7541 // Count cards and devices
7543 snd_card_next( &card );
7544 while ( card >= 0 ) {
7545 sprintf( name, "hw:%d", card );
7546 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
7548 errorStream_ << "RtApiAlsa::probeDeviceOpen: control open, card = " << card << ", " << snd_strerror( result ) << ".";
7549 errorText_ = errorStream_.str();
7554 result = snd_ctl_pcm_next_device( chandle, &subdevice );
7555 if ( result < 0 ) break;
7556 if ( subdevice < 0 ) break;
7557 if ( nDevices == device ) {
7558 sprintf( name, "hw:%d,%d", card, subdevice );
7559 snd_ctl_close( chandle );
7564 snd_ctl_close( chandle );
7565 snd_card_next( &card );
7568 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
7569 if ( result == 0 ) {
7570 if ( nDevices == device ) {
7571 strcpy( name, "default" );
7572 snd_ctl_close( chandle );
7577 snd_ctl_close( chandle );
7579 if ( nDevices == 0 ) {
7580 // This should not happen because a check is made before this function is called.
7581 errorText_ = "RtApiAlsa::probeDeviceOpen: no devices found!";
7585 if ( device >= nDevices ) {
7586 // This should not happen because a check is made before this function is called.
7587 errorText_ = "RtApiAlsa::probeDeviceOpen: device ID is invalid!";
7594 // The getDeviceInfo() function will not work for a device that is
7595 // already open. Thus, we'll probe the system before opening a
7596 // stream and save the results for use by getDeviceInfo().
7597 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) // only do once
7598 this->saveDeviceInfo();
7600 snd_pcm_stream_t stream;
7601 if ( mode == OUTPUT )
7602 stream = SND_PCM_STREAM_PLAYBACK;
7604 stream = SND_PCM_STREAM_CAPTURE;
7607 int openMode = SND_PCM_ASYNC;
7608 result = snd_pcm_open( &phandle, name, stream, openMode );
7610 if ( mode == OUTPUT )
7611 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for output.";
7613 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for input.";
7614 errorText_ = errorStream_.str();
7618 // Fill the parameter structure.
7619 snd_pcm_hw_params_t *hw_params;
7620 snd_pcm_hw_params_alloca( &hw_params );
7621 result = snd_pcm_hw_params_any( phandle, hw_params );
7623 snd_pcm_close( phandle );
7624 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") parameters, " << snd_strerror( result ) << ".";
7625 errorText_ = errorStream_.str();
7629 #if defined(__RTAUDIO_DEBUG__)
7630 fprintf( stderr, "\nRtApiAlsa: dump hardware params just after device open:\n\n" );
7631 snd_pcm_hw_params_dump( hw_params, out );
7634 // Set access ... check user preference.
7635 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) {
7636 stream_.userInterleaved = false;
7637 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
7639 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
7640 stream_.deviceInterleaved[mode] = true;
7643 stream_.deviceInterleaved[mode] = false;
7646 stream_.userInterleaved = true;
7647 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
7649 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
7650 stream_.deviceInterleaved[mode] = false;
7653 stream_.deviceInterleaved[mode] = true;
7657 snd_pcm_close( phandle );
7658 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") access, " << snd_strerror( result ) << ".";
7659 errorText_ = errorStream_.str();
7663 // Determine how to set the device format.
7664 stream_.userFormat = format;
7665 snd_pcm_format_t deviceFormat = SND_PCM_FORMAT_UNKNOWN;
7667 if ( format == RTAUDIO_SINT8 )
7668 deviceFormat = SND_PCM_FORMAT_S8;
7669 else if ( format == RTAUDIO_SINT16 )
7670 deviceFormat = SND_PCM_FORMAT_S16;
7671 else if ( format == RTAUDIO_SINT24 )
7672 deviceFormat = SND_PCM_FORMAT_S24;
7673 else if ( format == RTAUDIO_SINT32 )
7674 deviceFormat = SND_PCM_FORMAT_S32;
7675 else if ( format == RTAUDIO_FLOAT32 )
7676 deviceFormat = SND_PCM_FORMAT_FLOAT;
7677 else if ( format == RTAUDIO_FLOAT64 )
7678 deviceFormat = SND_PCM_FORMAT_FLOAT64;
7680 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat) == 0) {
7681 stream_.deviceFormat[mode] = format;
7685 // The user requested format is not natively supported by the device.
7686 deviceFormat = SND_PCM_FORMAT_FLOAT64;
7687 if ( snd_pcm_hw_params_test_format( phandle, hw_params, deviceFormat ) == 0 ) {
7688 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
7692 deviceFormat = SND_PCM_FORMAT_FLOAT;
7693 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7694 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
7698 deviceFormat = SND_PCM_FORMAT_S32;
7699 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7700 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
7704 deviceFormat = SND_PCM_FORMAT_S24;
7705 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7706 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
7710 deviceFormat = SND_PCM_FORMAT_S16;
7711 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7712 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
7716 deviceFormat = SND_PCM_FORMAT_S8;
7717 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7718 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
7722 // If we get here, no supported format was found.
7723 snd_pcm_close( phandle );
7724 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device " << device << " data format not supported by RtAudio.";
7725 errorText_ = errorStream_.str();
7729 result = snd_pcm_hw_params_set_format( phandle, hw_params, deviceFormat );
7731 snd_pcm_close( phandle );
7732 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") data format, " << snd_strerror( result ) << ".";
7733 errorText_ = errorStream_.str();
7737 // Determine whether byte-swaping is necessary.
7738 stream_.doByteSwap[mode] = false;
7739 if ( deviceFormat != SND_PCM_FORMAT_S8 ) {
7740 result = snd_pcm_format_cpu_endian( deviceFormat );
7742 stream_.doByteSwap[mode] = true;
7743 else if (result < 0) {
7744 snd_pcm_close( phandle );
7745 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") endian-ness, " << snd_strerror( result ) << ".";
7746 errorText_ = errorStream_.str();
7751 // Set the sample rate.
7752 result = snd_pcm_hw_params_set_rate_near( phandle, hw_params, (unsigned int*) &sampleRate, 0 );
7754 snd_pcm_close( phandle );
7755 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting sample rate on device (" << name << "), " << snd_strerror( result ) << ".";
7756 errorText_ = errorStream_.str();
7760 // Determine the number of channels for this device. We support a possible
7761 // minimum device channel number > than the value requested by the user.
7762 stream_.nUserChannels[mode] = channels;
7764 result = snd_pcm_hw_params_get_channels_max( hw_params, &value );
7765 unsigned int deviceChannels = value;
7766 if ( result < 0 || deviceChannels < channels + firstChannel ) {
7767 snd_pcm_close( phandle );
7768 errorStream_ << "RtApiAlsa::probeDeviceOpen: requested channel parameters not supported by device (" << name << "), " << snd_strerror( result ) << ".";
7769 errorText_ = errorStream_.str();
7773 result = snd_pcm_hw_params_get_channels_min( hw_params, &value );
7775 snd_pcm_close( phandle );
7776 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting minimum channels for device (" << name << "), " << snd_strerror( result ) << ".";
7777 errorText_ = errorStream_.str();
7780 deviceChannels = value;
7781 if ( deviceChannels < channels + firstChannel ) deviceChannels = channels + firstChannel;
7782 stream_.nDeviceChannels[mode] = deviceChannels;
7784 // Set the device channels.
7785 result = snd_pcm_hw_params_set_channels( phandle, hw_params, deviceChannels );
7787 snd_pcm_close( phandle );
7788 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting channels for device (" << name << "), " << snd_strerror( result ) << ".";
7789 errorText_ = errorStream_.str();
7793 // Set the buffer (or period) size.
7795 snd_pcm_uframes_t periodSize = *bufferSize;
7796 result = snd_pcm_hw_params_set_period_size_near( phandle, hw_params, &periodSize, &dir );
7798 snd_pcm_close( phandle );
7799 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting period size for device (" << name << "), " << snd_strerror( result ) << ".";
7800 errorText_ = errorStream_.str();
7803 *bufferSize = periodSize;
7805 // Set the buffer number, which in ALSA is referred to as the "period".
7806 unsigned int periods = 0;
7807 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) periods = 2;
7808 if ( options && options->numberOfBuffers > 0 ) periods = options->numberOfBuffers;
7809 if ( periods < 2 ) periods = 4; // a fairly safe default value
7810 result = snd_pcm_hw_params_set_periods_near( phandle, hw_params, &periods, &dir );
7812 snd_pcm_close( phandle );
7813 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting periods for device (" << name << "), " << snd_strerror( result ) << ".";
7814 errorText_ = errorStream_.str();
7818 // If attempting to setup a duplex stream, the bufferSize parameter
7819 // MUST be the same in both directions!
7820 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
7821 snd_pcm_close( phandle );
7822 errorStream_ << "RtApiAlsa::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << name << ").";
7823 errorText_ = errorStream_.str();
7827 stream_.bufferSize = *bufferSize;
7829 // Install the hardware configuration
7830 result = snd_pcm_hw_params( phandle, hw_params );
7832 snd_pcm_close( phandle );
7833 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing hardware configuration on device (" << name << "), " << snd_strerror( result ) << ".";
7834 errorText_ = errorStream_.str();
7838 #if defined(__RTAUDIO_DEBUG__)
7839 fprintf(stderr, "\nRtApiAlsa: dump hardware params after installation:\n\n");
7840 snd_pcm_hw_params_dump( hw_params, out );
7843 // Set the software configuration to fill buffers with zeros and prevent device stopping on xruns.
7844 snd_pcm_sw_params_t *sw_params = NULL;
7845 snd_pcm_sw_params_alloca( &sw_params );
7846 snd_pcm_sw_params_current( phandle, sw_params );
7847 snd_pcm_sw_params_set_start_threshold( phandle, sw_params, *bufferSize );
7848 snd_pcm_sw_params_set_stop_threshold( phandle, sw_params, ULONG_MAX );
7849 snd_pcm_sw_params_set_silence_threshold( phandle, sw_params, 0 );
7851 // The following two settings were suggested by Theo Veenker
7852 //snd_pcm_sw_params_set_avail_min( phandle, sw_params, *bufferSize );
7853 //snd_pcm_sw_params_set_xfer_align( phandle, sw_params, 1 );
7855 // here are two options for a fix
7856 //snd_pcm_sw_params_set_silence_size( phandle, sw_params, ULONG_MAX );
7857 snd_pcm_uframes_t val;
7858 snd_pcm_sw_params_get_boundary( sw_params, &val );
7859 snd_pcm_sw_params_set_silence_size( phandle, sw_params, val );
7861 result = snd_pcm_sw_params( phandle, sw_params );
7863 snd_pcm_close( phandle );
7864 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing software configuration on device (" << name << "), " << snd_strerror( result ) << ".";
7865 errorText_ = errorStream_.str();
7869 #if defined(__RTAUDIO_DEBUG__)
7870 fprintf(stderr, "\nRtApiAlsa: dump software params after installation:\n\n");
7871 snd_pcm_sw_params_dump( sw_params, out );
7874 // Set flags for buffer conversion
7875 stream_.doConvertBuffer[mode] = false;
7876 if ( stream_.userFormat != stream_.deviceFormat[mode] )
7877 stream_.doConvertBuffer[mode] = true;
7878 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
7879 stream_.doConvertBuffer[mode] = true;
7880 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
7881 stream_.nUserChannels[mode] > 1 )
7882 stream_.doConvertBuffer[mode] = true;
7884 // Allocate the ApiHandle if necessary and then save.
7885 AlsaHandle *apiInfo = 0;
7886 if ( stream_.apiHandle == 0 ) {
7888 apiInfo = (AlsaHandle *) new AlsaHandle;
7890 catch ( std::bad_alloc& ) {
7891 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating AlsaHandle memory.";
7895 if ( pthread_cond_init( &apiInfo->runnable_cv, NULL ) ) {
7896 errorText_ = "RtApiAlsa::probeDeviceOpen: error initializing pthread condition variable.";
7900 stream_.apiHandle = (void *) apiInfo;
7901 apiInfo->handles[0] = 0;
7902 apiInfo->handles[1] = 0;
7905 apiInfo = (AlsaHandle *) stream_.apiHandle;
7907 apiInfo->handles[mode] = phandle;
7910 // Allocate necessary internal buffers.
7911 unsigned long bufferBytes;
7912 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
7913 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
7914 if ( stream_.userBuffer[mode] == NULL ) {
7915 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating user buffer memory.";
7919 if ( stream_.doConvertBuffer[mode] ) {
7921 bool makeBuffer = true;
7922 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
7923 if ( mode == INPUT ) {
7924 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
7925 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
7926 if ( bufferBytes <= bytesOut ) makeBuffer = false;
7931 bufferBytes *= *bufferSize;
7932 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
7933 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
7934 if ( stream_.deviceBuffer == NULL ) {
7935 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating device buffer memory.";
7941 stream_.sampleRate = sampleRate;
7942 stream_.nBuffers = periods;
7943 stream_.device[mode] = device;
7944 stream_.state = STREAM_STOPPED;
7946 // Setup the buffer conversion information structure.
7947 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
7949 // Setup thread if necessary.
7950 if ( stream_.mode == OUTPUT && mode == INPUT ) {
7951 // We had already set up an output stream.
7952 stream_.mode = DUPLEX;
7953 // Link the streams if possible.
7954 apiInfo->synchronized = false;
7955 if ( snd_pcm_link( apiInfo->handles[0], apiInfo->handles[1] ) == 0 )
7956 apiInfo->synchronized = true;
7958 errorText_ = "RtApiAlsa::probeDeviceOpen: unable to synchronize input and output devices.";
7959 error( RtAudioError::WARNING );
7963 stream_.mode = mode;
7965 // Setup callback thread.
7966 stream_.callbackInfo.object = (void *) this;
7968 // Set the thread attributes for joinable and realtime scheduling
7969 // priority (optional). The higher priority will only take affect
7970 // if the program is run as root or suid. Note, under Linux
7971 // processes with CAP_SYS_NICE privilege, a user can change
7972 // scheduling policy and priority (thus need not be root). See
7973 // POSIX "capabilities".
7974 pthread_attr_t attr;
7975 pthread_attr_init( &attr );
7976 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
7977 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
7978 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
7979 stream_.callbackInfo.doRealtime = true;
7980 struct sched_param param;
7981 int priority = options->priority;
7982 int min = sched_get_priority_min( SCHED_RR );
7983 int max = sched_get_priority_max( SCHED_RR );
7984 if ( priority < min ) priority = min;
7985 else if ( priority > max ) priority = max;
7986 param.sched_priority = priority;
7988 // Set the policy BEFORE the priority. Otherwise it fails.
7989 pthread_attr_setschedpolicy(&attr, SCHED_RR);
7990 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
7991 // This is definitely required. Otherwise it fails.
7992 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
7993 pthread_attr_setschedparam(&attr, ¶m);
7996 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
7998 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
8001 stream_.callbackInfo.isRunning = true;
8002 result = pthread_create( &stream_.callbackInfo.thread, &attr, alsaCallbackHandler, &stream_.callbackInfo );
8003 pthread_attr_destroy( &attr );
8005 // Failed. Try instead with default attributes.
8006 result = pthread_create( &stream_.callbackInfo.thread, NULL, alsaCallbackHandler, &stream_.callbackInfo );
8008 stream_.callbackInfo.isRunning = false;
8009 errorText_ = "RtApiAlsa::error creating callback thread!";
8019 pthread_cond_destroy( &apiInfo->runnable_cv );
8020 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
8021 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
8023 stream_.apiHandle = 0;
8026 if ( phandle) snd_pcm_close( phandle );
8028 for ( int i=0; i<2; i++ ) {
8029 if ( stream_.userBuffer[i] ) {
8030 free( stream_.userBuffer[i] );
8031 stream_.userBuffer[i] = 0;
8035 if ( stream_.deviceBuffer ) {
8036 free( stream_.deviceBuffer );
8037 stream_.deviceBuffer = 0;
8040 stream_.state = STREAM_CLOSED;
8044 void RtApiAlsa :: closeStream()
8046 if ( stream_.state == STREAM_CLOSED ) {
8047 errorText_ = "RtApiAlsa::closeStream(): no open stream to close!";
8048 error( RtAudioError::WARNING );
8052 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8053 stream_.callbackInfo.isRunning = false;
8054 MUTEX_LOCK( &stream_.mutex );
8055 if ( stream_.state == STREAM_STOPPED ) {
8056 apiInfo->runnable = true;
8057 pthread_cond_signal( &apiInfo->runnable_cv );
8059 MUTEX_UNLOCK( &stream_.mutex );
8060 pthread_join( stream_.callbackInfo.thread, NULL );
8062 if ( stream_.state == STREAM_RUNNING ) {
8063 stream_.state = STREAM_STOPPED;
8064 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
8065 snd_pcm_drop( apiInfo->handles[0] );
8066 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
8067 snd_pcm_drop( apiInfo->handles[1] );
8071 pthread_cond_destroy( &apiInfo->runnable_cv );
8072 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
8073 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
8075 stream_.apiHandle = 0;
8078 for ( int i=0; i<2; i++ ) {
8079 if ( stream_.userBuffer[i] ) {
8080 free( stream_.userBuffer[i] );
8081 stream_.userBuffer[i] = 0;
8085 if ( stream_.deviceBuffer ) {
8086 free( stream_.deviceBuffer );
8087 stream_.deviceBuffer = 0;
8090 stream_.mode = UNINITIALIZED;
8091 stream_.state = STREAM_CLOSED;
8094 void RtApiAlsa :: startStream()
8096 // This method calls snd_pcm_prepare if the device isn't already in that state.
8099 if ( stream_.state == STREAM_RUNNING ) {
8100 errorText_ = "RtApiAlsa::startStream(): the stream is already running!";
8101 error( RtAudioError::WARNING );
8105 MUTEX_LOCK( &stream_.mutex );
8107 #if defined( HAVE_GETTIMEOFDAY )
8108 gettimeofday( &stream_.lastTickTimestamp, NULL );
8112 snd_pcm_state_t state;
8113 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8114 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
8115 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8116 state = snd_pcm_state( handle[0] );
8117 if ( state != SND_PCM_STATE_PREPARED ) {
8118 result = snd_pcm_prepare( handle[0] );
8120 errorStream_ << "RtApiAlsa::startStream: error preparing output pcm device, " << snd_strerror( result ) << ".";
8121 errorText_ = errorStream_.str();
8127 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
8128 result = snd_pcm_drop(handle[1]); // fix to remove stale data received since device has been open
8129 state = snd_pcm_state( handle[1] );
8130 if ( state != SND_PCM_STATE_PREPARED ) {
8131 result = snd_pcm_prepare( handle[1] );
8133 errorStream_ << "RtApiAlsa::startStream: error preparing input pcm device, " << snd_strerror( result ) << ".";
8134 errorText_ = errorStream_.str();
8140 stream_.state = STREAM_RUNNING;
8143 apiInfo->runnable = true;
8144 pthread_cond_signal( &apiInfo->runnable_cv );
8145 MUTEX_UNLOCK( &stream_.mutex );
8147 if ( result >= 0 ) return;
8148 error( RtAudioError::SYSTEM_ERROR );
8151 void RtApiAlsa :: stopStream()
8154 if ( stream_.state == STREAM_STOPPED ) {
8155 errorText_ = "RtApiAlsa::stopStream(): the stream is already stopped!";
8156 error( RtAudioError::WARNING );
8160 stream_.state = STREAM_STOPPED;
8161 MUTEX_LOCK( &stream_.mutex );
8164 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8165 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
8166 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8167 if ( apiInfo->synchronized )
8168 result = snd_pcm_drop( handle[0] );
8170 result = snd_pcm_drain( handle[0] );
8172 errorStream_ << "RtApiAlsa::stopStream: error draining output pcm device, " << snd_strerror( result ) << ".";
8173 errorText_ = errorStream_.str();
8178 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
8179 result = snd_pcm_drop( handle[1] );
8181 errorStream_ << "RtApiAlsa::stopStream: error stopping input pcm device, " << snd_strerror( result ) << ".";
8182 errorText_ = errorStream_.str();
8188 apiInfo->runnable = false; // fixes high CPU usage when stopped
8189 MUTEX_UNLOCK( &stream_.mutex );
8191 if ( result >= 0 ) return;
8192 error( RtAudioError::SYSTEM_ERROR );
8195 void RtApiAlsa :: abortStream()
8198 if ( stream_.state == STREAM_STOPPED ) {
8199 errorText_ = "RtApiAlsa::abortStream(): the stream is already stopped!";
8200 error( RtAudioError::WARNING );
8204 stream_.state = STREAM_STOPPED;
8205 MUTEX_LOCK( &stream_.mutex );
8208 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8209 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
8210 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8211 result = snd_pcm_drop( handle[0] );
8213 errorStream_ << "RtApiAlsa::abortStream: error aborting output pcm device, " << snd_strerror( result ) << ".";
8214 errorText_ = errorStream_.str();
8219 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
8220 result = snd_pcm_drop( handle[1] );
8222 errorStream_ << "RtApiAlsa::abortStream: error aborting input pcm device, " << snd_strerror( result ) << ".";
8223 errorText_ = errorStream_.str();
8229 apiInfo->runnable = false; // fixes high CPU usage when stopped
8230 MUTEX_UNLOCK( &stream_.mutex );
8232 if ( result >= 0 ) return;
8233 error( RtAudioError::SYSTEM_ERROR );
8236 void RtApiAlsa :: callbackEvent()
8238 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8239 if ( stream_.state == STREAM_STOPPED ) {
8240 MUTEX_LOCK( &stream_.mutex );
8241 while ( !apiInfo->runnable )
8242 pthread_cond_wait( &apiInfo->runnable_cv, &stream_.mutex );
8244 if ( stream_.state != STREAM_RUNNING ) {
8245 MUTEX_UNLOCK( &stream_.mutex );
8248 MUTEX_UNLOCK( &stream_.mutex );
8251 if ( stream_.state == STREAM_CLOSED ) {
8252 errorText_ = "RtApiAlsa::callbackEvent(): the stream is closed ... this shouldn't happen!";
8253 error( RtAudioError::WARNING );
8257 int doStopStream = 0;
8258 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
8259 double streamTime = getStreamTime();
8260 RtAudioStreamStatus status = 0;
8261 if ( stream_.mode != INPUT && apiInfo->xrun[0] == true ) {
8262 status |= RTAUDIO_OUTPUT_UNDERFLOW;
8263 apiInfo->xrun[0] = false;
8265 if ( stream_.mode != OUTPUT && apiInfo->xrun[1] == true ) {
8266 status |= RTAUDIO_INPUT_OVERFLOW;
8267 apiInfo->xrun[1] = false;
8269 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
8270 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
8272 if ( doStopStream == 2 ) {
8277 MUTEX_LOCK( &stream_.mutex );
8279 // The state might change while waiting on a mutex.
8280 if ( stream_.state == STREAM_STOPPED ) goto unlock;
8286 snd_pcm_sframes_t frames;
8287 RtAudioFormat format;
8288 handle = (snd_pcm_t **) apiInfo->handles;
8290 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
8292 // Setup parameters.
8293 if ( stream_.doConvertBuffer[1] ) {
8294 buffer = stream_.deviceBuffer;
8295 channels = stream_.nDeviceChannels[1];
8296 format = stream_.deviceFormat[1];
8299 buffer = stream_.userBuffer[1];
8300 channels = stream_.nUserChannels[1];
8301 format = stream_.userFormat;
8304 // Read samples from device in interleaved/non-interleaved format.
8305 if ( stream_.deviceInterleaved[1] )
8306 result = snd_pcm_readi( handle[1], buffer, stream_.bufferSize );
8308 void *bufs[channels];
8309 size_t offset = stream_.bufferSize * formatBytes( format );
8310 for ( int i=0; i<channels; i++ )
8311 bufs[i] = (void *) (buffer + (i * offset));
8312 result = snd_pcm_readn( handle[1], bufs, stream_.bufferSize );
8315 if ( result < (int) stream_.bufferSize ) {
8316 // Either an error or overrun occured.
8317 if ( result == -EPIPE ) {
8318 snd_pcm_state_t state = snd_pcm_state( handle[1] );
8319 if ( state == SND_PCM_STATE_XRUN ) {
8320 apiInfo->xrun[1] = true;
8321 result = snd_pcm_prepare( handle[1] );
8323 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after overrun, " << snd_strerror( result ) << ".";
8324 errorText_ = errorStream_.str();
8328 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
8329 errorText_ = errorStream_.str();
8333 errorStream_ << "RtApiAlsa::callbackEvent: audio read error, " << snd_strerror( result ) << ".";
8334 errorText_ = errorStream_.str();
8336 error( RtAudioError::WARNING );
8340 // Do byte swapping if necessary.
8341 if ( stream_.doByteSwap[1] )
8342 byteSwapBuffer( buffer, stream_.bufferSize * channels, format );
8344 // Do buffer conversion if necessary.
8345 if ( stream_.doConvertBuffer[1] )
8346 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
8348 // Check stream latency
8349 result = snd_pcm_delay( handle[1], &frames );
8350 if ( result == 0 && frames > 0 ) stream_.latency[1] = frames;
8355 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8357 // Setup parameters and do buffer conversion if necessary.
8358 if ( stream_.doConvertBuffer[0] ) {
8359 buffer = stream_.deviceBuffer;
8360 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
8361 channels = stream_.nDeviceChannels[0];
8362 format = stream_.deviceFormat[0];
8365 buffer = stream_.userBuffer[0];
8366 channels = stream_.nUserChannels[0];
8367 format = stream_.userFormat;
8370 // Do byte swapping if necessary.
8371 if ( stream_.doByteSwap[0] )
8372 byteSwapBuffer(buffer, stream_.bufferSize * channels, format);
8374 // Write samples to device in interleaved/non-interleaved format.
8375 if ( stream_.deviceInterleaved[0] )
8376 result = snd_pcm_writei( handle[0], buffer, stream_.bufferSize );
8378 void *bufs[channels];
8379 size_t offset = stream_.bufferSize * formatBytes( format );
8380 for ( int i=0; i<channels; i++ )
8381 bufs[i] = (void *) (buffer + (i * offset));
8382 result = snd_pcm_writen( handle[0], bufs, stream_.bufferSize );
8385 if ( result < (int) stream_.bufferSize ) {
8386 // Either an error or underrun occured.
8387 if ( result == -EPIPE ) {
8388 snd_pcm_state_t state = snd_pcm_state( handle[0] );
8389 if ( state == SND_PCM_STATE_XRUN ) {
8390 apiInfo->xrun[0] = true;
8391 result = snd_pcm_prepare( handle[0] );
8393 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after underrun, " << snd_strerror( result ) << ".";
8394 errorText_ = errorStream_.str();
8397 errorText_ = "RtApiAlsa::callbackEvent: audio write error, underrun.";
8400 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
8401 errorText_ = errorStream_.str();
8405 errorStream_ << "RtApiAlsa::callbackEvent: audio write error, " << snd_strerror( result ) << ".";
8406 errorText_ = errorStream_.str();
8408 error( RtAudioError::WARNING );
8412 // Check stream latency
8413 result = snd_pcm_delay( handle[0], &frames );
8414 if ( result == 0 && frames > 0 ) stream_.latency[0] = frames;
8418 MUTEX_UNLOCK( &stream_.mutex );
8420 RtApi::tickStreamTime();
8421 if ( doStopStream == 1 ) this->stopStream();
8424 static void *alsaCallbackHandler( void *ptr )
8426 CallbackInfo *info = (CallbackInfo *) ptr;
8427 RtApiAlsa *object = (RtApiAlsa *) info->object;
8428 bool *isRunning = &info->isRunning;
8430 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
8431 if ( info->doRealtime ) {
8432 std::cerr << "RtAudio alsa: " <<
8433 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
8434 "running realtime scheduling" << std::endl;
8438 while ( *isRunning == true ) {
8439 pthread_testcancel();
8440 object->callbackEvent();
8443 pthread_exit( NULL );
8446 //******************** End of __LINUX_ALSA__ *********************//
8449 #if defined(__LINUX_PULSE__)
8451 // Code written by Peter Meerwald, pmeerw@pmeerw.net
8452 // and Tristan Matthews.
8454 #include <pulse/error.h>
8455 #include <pulse/simple.h>
8458 static const unsigned int SUPPORTED_SAMPLERATES[] = { 8000, 16000, 22050, 32000,
8459 44100, 48000, 96000, 0};
8461 struct rtaudio_pa_format_mapping_t {
8462 RtAudioFormat rtaudio_format;
8463 pa_sample_format_t pa_format;
8466 static const rtaudio_pa_format_mapping_t supported_sampleformats[] = {
8467 {RTAUDIO_SINT16, PA_SAMPLE_S16LE},
8468 {RTAUDIO_SINT32, PA_SAMPLE_S32LE},
8469 {RTAUDIO_FLOAT32, PA_SAMPLE_FLOAT32LE},
8470 {0, PA_SAMPLE_INVALID}};
8472 struct PulseAudioHandle {
8476 pthread_cond_t runnable_cv;
8478 PulseAudioHandle() : s_play(0), s_rec(0), runnable(false) { }
8481 RtApiPulse::~RtApiPulse()
8483 if ( stream_.state != STREAM_CLOSED )
8487 unsigned int RtApiPulse::getDeviceCount( void )
8492 RtAudio::DeviceInfo RtApiPulse::getDeviceInfo( unsigned int /*device*/ )
8494 RtAudio::DeviceInfo info;
8496 info.name = "PulseAudio";
8497 info.outputChannels = 2;
8498 info.inputChannels = 2;
8499 info.duplexChannels = 2;
8500 info.isDefaultOutput = true;
8501 info.isDefaultInput = true;
8503 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr )
8504 info.sampleRates.push_back( *sr );
8506 info.preferredSampleRate = 48000;
8507 info.nativeFormats = RTAUDIO_SINT16 | RTAUDIO_SINT32 | RTAUDIO_FLOAT32;
8512 static void *pulseaudio_callback( void * user )
8514 CallbackInfo *cbi = static_cast<CallbackInfo *>( user );
8515 RtApiPulse *context = static_cast<RtApiPulse *>( cbi->object );
8516 volatile bool *isRunning = &cbi->isRunning;
8518 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
8519 if (cbi->doRealtime) {
8520 std::cerr << "RtAudio pulse: " <<
8521 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
8522 "running realtime scheduling" << std::endl;
8526 while ( *isRunning ) {
8527 pthread_testcancel();
8528 context->callbackEvent();
8531 pthread_exit( NULL );
8534 void RtApiPulse::closeStream( void )
8536 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8538 stream_.callbackInfo.isRunning = false;
8540 MUTEX_LOCK( &stream_.mutex );
8541 if ( stream_.state == STREAM_STOPPED ) {
8542 pah->runnable = true;
8543 pthread_cond_signal( &pah->runnable_cv );
8545 MUTEX_UNLOCK( &stream_.mutex );
8547 pthread_join( pah->thread, 0 );
8548 if ( pah->s_play ) {
8549 pa_simple_flush( pah->s_play, NULL );
8550 pa_simple_free( pah->s_play );
8553 pa_simple_free( pah->s_rec );
8555 pthread_cond_destroy( &pah->runnable_cv );
8557 stream_.apiHandle = 0;
8560 if ( stream_.userBuffer[0] ) {
8561 free( stream_.userBuffer[0] );
8562 stream_.userBuffer[0] = 0;
8564 if ( stream_.userBuffer[1] ) {
8565 free( stream_.userBuffer[1] );
8566 stream_.userBuffer[1] = 0;
8569 stream_.state = STREAM_CLOSED;
8570 stream_.mode = UNINITIALIZED;
8573 void RtApiPulse::callbackEvent( void )
8575 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8577 if ( stream_.state == STREAM_STOPPED ) {
8578 MUTEX_LOCK( &stream_.mutex );
8579 while ( !pah->runnable )
8580 pthread_cond_wait( &pah->runnable_cv, &stream_.mutex );
8582 if ( stream_.state != STREAM_RUNNING ) {
8583 MUTEX_UNLOCK( &stream_.mutex );
8586 MUTEX_UNLOCK( &stream_.mutex );
8589 if ( stream_.state == STREAM_CLOSED ) {
8590 errorText_ = "RtApiPulse::callbackEvent(): the stream is closed ... "
8591 "this shouldn't happen!";
8592 error( RtAudioError::WARNING );
8596 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
8597 double streamTime = getStreamTime();
8598 RtAudioStreamStatus status = 0;
8599 int doStopStream = callback( stream_.userBuffer[OUTPUT], stream_.userBuffer[INPUT],
8600 stream_.bufferSize, streamTime, status,
8601 stream_.callbackInfo.userData );
8603 if ( doStopStream == 2 ) {
8608 MUTEX_LOCK( &stream_.mutex );
8609 void *pulse_in = stream_.doConvertBuffer[INPUT] ? stream_.deviceBuffer : stream_.userBuffer[INPUT];
8610 void *pulse_out = stream_.doConvertBuffer[OUTPUT] ? stream_.deviceBuffer : stream_.userBuffer[OUTPUT];
8612 if ( stream_.state != STREAM_RUNNING )
8617 if (stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8618 if ( stream_.doConvertBuffer[OUTPUT] ) {
8619 convertBuffer( stream_.deviceBuffer,
8620 stream_.userBuffer[OUTPUT],
8621 stream_.convertInfo[OUTPUT] );
8622 bytes = stream_.nDeviceChannels[OUTPUT] * stream_.bufferSize *
8623 formatBytes( stream_.deviceFormat[OUTPUT] );
8625 bytes = stream_.nUserChannels[OUTPUT] * stream_.bufferSize *
8626 formatBytes( stream_.userFormat );
8628 if ( pa_simple_write( pah->s_play, pulse_out, bytes, &pa_error ) < 0 ) {
8629 errorStream_ << "RtApiPulse::callbackEvent: audio write error, " <<
8630 pa_strerror( pa_error ) << ".";
8631 errorText_ = errorStream_.str();
8632 error( RtAudioError::WARNING );
8636 if ( stream_.mode == INPUT || stream_.mode == DUPLEX) {
8637 if ( stream_.doConvertBuffer[INPUT] )
8638 bytes = stream_.nDeviceChannels[INPUT] * stream_.bufferSize *
8639 formatBytes( stream_.deviceFormat[INPUT] );
8641 bytes = stream_.nUserChannels[INPUT] * stream_.bufferSize *
8642 formatBytes( stream_.userFormat );
8644 if ( pa_simple_read( pah->s_rec, pulse_in, bytes, &pa_error ) < 0 ) {
8645 errorStream_ << "RtApiPulse::callbackEvent: audio read error, " <<
8646 pa_strerror( pa_error ) << ".";
8647 errorText_ = errorStream_.str();
8648 error( RtAudioError::WARNING );
8650 if ( stream_.doConvertBuffer[INPUT] ) {
8651 convertBuffer( stream_.userBuffer[INPUT],
8652 stream_.deviceBuffer,
8653 stream_.convertInfo[INPUT] );
8658 MUTEX_UNLOCK( &stream_.mutex );
8659 RtApi::tickStreamTime();
8661 if ( doStopStream == 1 )
8665 void RtApiPulse::startStream( void )
8667 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8669 if ( stream_.state == STREAM_CLOSED ) {
8670 errorText_ = "RtApiPulse::startStream(): the stream is not open!";
8671 error( RtAudioError::INVALID_USE );
8674 if ( stream_.state == STREAM_RUNNING ) {
8675 errorText_ = "RtApiPulse::startStream(): the stream is already running!";
8676 error( RtAudioError::WARNING );
8680 MUTEX_LOCK( &stream_.mutex );
8682 #if defined( HAVE_GETTIMEOFDAY )
8683 gettimeofday( &stream_.lastTickTimestamp, NULL );
8686 stream_.state = STREAM_RUNNING;
8688 pah->runnable = true;
8689 pthread_cond_signal( &pah->runnable_cv );
8690 MUTEX_UNLOCK( &stream_.mutex );
8693 void RtApiPulse::stopStream( void )
8695 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8697 if ( stream_.state == STREAM_CLOSED ) {
8698 errorText_ = "RtApiPulse::stopStream(): the stream is not open!";
8699 error( RtAudioError::INVALID_USE );
8702 if ( stream_.state == STREAM_STOPPED ) {
8703 errorText_ = "RtApiPulse::stopStream(): the stream is already stopped!";
8704 error( RtAudioError::WARNING );
8708 stream_.state = STREAM_STOPPED;
8709 MUTEX_LOCK( &stream_.mutex );
8711 if ( pah && pah->s_play ) {
8713 if ( pa_simple_drain( pah->s_play, &pa_error ) < 0 ) {
8714 errorStream_ << "RtApiPulse::stopStream: error draining output device, " <<
8715 pa_strerror( pa_error ) << ".";
8716 errorText_ = errorStream_.str();
8717 MUTEX_UNLOCK( &stream_.mutex );
8718 error( RtAudioError::SYSTEM_ERROR );
8723 stream_.state = STREAM_STOPPED;
8724 MUTEX_UNLOCK( &stream_.mutex );
8727 void RtApiPulse::abortStream( void )
8729 PulseAudioHandle *pah = static_cast<PulseAudioHandle*>( stream_.apiHandle );
8731 if ( stream_.state == STREAM_CLOSED ) {
8732 errorText_ = "RtApiPulse::abortStream(): the stream is not open!";
8733 error( RtAudioError::INVALID_USE );
8736 if ( stream_.state == STREAM_STOPPED ) {
8737 errorText_ = "RtApiPulse::abortStream(): the stream is already stopped!";
8738 error( RtAudioError::WARNING );
8742 stream_.state = STREAM_STOPPED;
8743 MUTEX_LOCK( &stream_.mutex );
8745 if ( pah && pah->s_play ) {
8747 if ( pa_simple_flush( pah->s_play, &pa_error ) < 0 ) {
8748 errorStream_ << "RtApiPulse::abortStream: error flushing output device, " <<
8749 pa_strerror( pa_error ) << ".";
8750 errorText_ = errorStream_.str();
8751 MUTEX_UNLOCK( &stream_.mutex );
8752 error( RtAudioError::SYSTEM_ERROR );
8757 stream_.state = STREAM_STOPPED;
8758 MUTEX_UNLOCK( &stream_.mutex );
8761 bool RtApiPulse::probeDeviceOpen( unsigned int device, StreamMode mode,
8762 unsigned int channels, unsigned int firstChannel,
8763 unsigned int sampleRate, RtAudioFormat format,
8764 unsigned int *bufferSize, RtAudio::StreamOptions *options )
8766 PulseAudioHandle *pah = 0;
8767 unsigned long bufferBytes = 0;
8770 if ( device != 0 ) return false;
8771 if ( mode != INPUT && mode != OUTPUT ) return false;
8772 if ( channels != 1 && channels != 2 ) {
8773 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported number of channels.";
8776 ss.channels = channels;
8778 if ( firstChannel != 0 ) return false;
8780 bool sr_found = false;
8781 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr ) {
8782 if ( sampleRate == *sr ) {
8784 stream_.sampleRate = sampleRate;
8785 ss.rate = sampleRate;
8790 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported sample rate.";
8795 for ( const rtaudio_pa_format_mapping_t *sf = supported_sampleformats;
8796 sf->rtaudio_format && sf->pa_format != PA_SAMPLE_INVALID; ++sf ) {
8797 if ( format == sf->rtaudio_format ) {
8799 stream_.userFormat = sf->rtaudio_format;
8800 stream_.deviceFormat[mode] = stream_.userFormat;
8801 ss.format = sf->pa_format;
8805 if ( !sf_found ) { // Use internal data format conversion.
8806 stream_.userFormat = format;
8807 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
8808 ss.format = PA_SAMPLE_FLOAT32LE;
8811 // Set other stream parameters.
8812 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
8813 else stream_.userInterleaved = true;
8814 stream_.deviceInterleaved[mode] = true;
8815 stream_.nBuffers = 1;
8816 stream_.doByteSwap[mode] = false;
8817 stream_.nUserChannels[mode] = channels;
8818 stream_.nDeviceChannels[mode] = channels + firstChannel;
8819 stream_.channelOffset[mode] = 0;
8820 std::string streamName = "RtAudio";
8822 // Set flags for buffer conversion.
8823 stream_.doConvertBuffer[mode] = false;
8824 if ( stream_.userFormat != stream_.deviceFormat[mode] )
8825 stream_.doConvertBuffer[mode] = true;
8826 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
8827 stream_.doConvertBuffer[mode] = true;
8829 // Allocate necessary internal buffers.
8830 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
8831 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
8832 if ( stream_.userBuffer[mode] == NULL ) {
8833 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating user buffer memory.";
8836 stream_.bufferSize = *bufferSize;
8838 if ( stream_.doConvertBuffer[mode] ) {
8840 bool makeBuffer = true;
8841 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
8842 if ( mode == INPUT ) {
8843 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
8844 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
8845 if ( bufferBytes <= bytesOut ) makeBuffer = false;
8850 bufferBytes *= *bufferSize;
8851 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
8852 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
8853 if ( stream_.deviceBuffer == NULL ) {
8854 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating device buffer memory.";
8860 stream_.device[mode] = device;
8862 // Setup the buffer conversion information structure.
8863 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
8865 if ( !stream_.apiHandle ) {
8866 PulseAudioHandle *pah = new PulseAudioHandle;
8868 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating memory for handle.";
8872 stream_.apiHandle = pah;
8873 if ( pthread_cond_init( &pah->runnable_cv, NULL ) != 0 ) {
8874 errorText_ = "RtApiPulse::probeDeviceOpen: error creating condition variable.";
8878 pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8881 if ( options && !options->streamName.empty() ) streamName = options->streamName;
8884 pa_buffer_attr buffer_attr;
8885 buffer_attr.fragsize = bufferBytes;
8886 buffer_attr.maxlength = -1;
8888 pah->s_rec = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_RECORD, NULL, "Record", &ss, NULL, &buffer_attr, &error );
8889 if ( !pah->s_rec ) {
8890 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting input to PulseAudio server.";
8895 pah->s_play = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_PLAYBACK, NULL, "Playback", &ss, NULL, NULL, &error );
8896 if ( !pah->s_play ) {
8897 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting output to PulseAudio server.";
8905 if ( stream_.mode == UNINITIALIZED )
8906 stream_.mode = mode;
8907 else if ( stream_.mode == mode )
8910 stream_.mode = DUPLEX;
8912 if ( !stream_.callbackInfo.isRunning ) {
8913 stream_.callbackInfo.object = this;
8915 stream_.state = STREAM_STOPPED;
8916 // Set the thread attributes for joinable and realtime scheduling
8917 // priority (optional). The higher priority will only take affect
8918 // if the program is run as root or suid. Note, under Linux
8919 // processes with CAP_SYS_NICE privilege, a user can change
8920 // scheduling policy and priority (thus need not be root). See
8921 // POSIX "capabilities".
8922 pthread_attr_t attr;
8923 pthread_attr_init( &attr );
8924 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
8925 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
8926 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
8927 stream_.callbackInfo.doRealtime = true;
8928 struct sched_param param;
8929 int priority = options->priority;
8930 int min = sched_get_priority_min( SCHED_RR );
8931 int max = sched_get_priority_max( SCHED_RR );
8932 if ( priority < min ) priority = min;
8933 else if ( priority > max ) priority = max;
8934 param.sched_priority = priority;
8936 // Set the policy BEFORE the priority. Otherwise it fails.
8937 pthread_attr_setschedpolicy(&attr, SCHED_RR);
8938 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
8939 // This is definitely required. Otherwise it fails.
8940 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
8941 pthread_attr_setschedparam(&attr, ¶m);
8944 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
8946 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
8949 stream_.callbackInfo.isRunning = true;
8950 int result = pthread_create( &pah->thread, &attr, pulseaudio_callback, (void *)&stream_.callbackInfo);
8951 pthread_attr_destroy(&attr);
8953 // Failed. Try instead with default attributes.
8954 result = pthread_create( &pah->thread, NULL, pulseaudio_callback, (void *)&stream_.callbackInfo);
8956 stream_.callbackInfo.isRunning = false;
8957 errorText_ = "RtApiPulse::probeDeviceOpen: error creating thread.";
8966 if ( pah && stream_.callbackInfo.isRunning ) {
8967 pthread_cond_destroy( &pah->runnable_cv );
8969 stream_.apiHandle = 0;
8972 for ( int i=0; i<2; i++ ) {
8973 if ( stream_.userBuffer[i] ) {
8974 free( stream_.userBuffer[i] );
8975 stream_.userBuffer[i] = 0;
8979 if ( stream_.deviceBuffer ) {
8980 free( stream_.deviceBuffer );
8981 stream_.deviceBuffer = 0;
8984 stream_.state = STREAM_CLOSED;
8988 //******************** End of __LINUX_PULSE__ *********************//
8991 #if defined(__LINUX_OSS__)
8994 #include <sys/ioctl.h>
8997 #include <sys/soundcard.h>
9001 static void *ossCallbackHandler(void * ptr);
9003 // A structure to hold various information related to the OSS API
9006 int id[2]; // device ids
9009 pthread_cond_t runnable;
9012 :triggered(false) { id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
9015 RtApiOss :: RtApiOss()
9017 // Nothing to do here.
9020 RtApiOss :: ~RtApiOss()
9022 if ( stream_.state != STREAM_CLOSED ) closeStream();
9025 unsigned int RtApiOss :: getDeviceCount( void )
9027 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
9028 if ( mixerfd == -1 ) {
9029 errorText_ = "RtApiOss::getDeviceCount: error opening '/dev/mixer'.";
9030 error( RtAudioError::WARNING );
9034 oss_sysinfo sysinfo;
9035 if ( ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo ) == -1 ) {
9037 errorText_ = "RtApiOss::getDeviceCount: error getting sysinfo, OSS version >= 4.0 is required.";
9038 error( RtAudioError::WARNING );
9043 return sysinfo.numaudios;
9046 RtAudio::DeviceInfo RtApiOss :: getDeviceInfo( unsigned int device )
9048 RtAudio::DeviceInfo info;
9049 info.probed = false;
9051 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
9052 if ( mixerfd == -1 ) {
9053 errorText_ = "RtApiOss::getDeviceInfo: error opening '/dev/mixer'.";
9054 error( RtAudioError::WARNING );
9058 oss_sysinfo sysinfo;
9059 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
9060 if ( result == -1 ) {
9062 errorText_ = "RtApiOss::getDeviceInfo: error getting sysinfo, OSS version >= 4.0 is required.";
9063 error( RtAudioError::WARNING );
9067 unsigned nDevices = sysinfo.numaudios;
9068 if ( nDevices == 0 ) {
9070 errorText_ = "RtApiOss::getDeviceInfo: no devices found!";
9071 error( RtAudioError::INVALID_USE );
9075 if ( device >= nDevices ) {
9077 errorText_ = "RtApiOss::getDeviceInfo: device ID is invalid!";
9078 error( RtAudioError::INVALID_USE );
9082 oss_audioinfo ainfo;
9084 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
9086 if ( result == -1 ) {
9087 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
9088 errorText_ = errorStream_.str();
9089 error( RtAudioError::WARNING );
9094 if ( ainfo.caps & PCM_CAP_OUTPUT ) info.outputChannels = ainfo.max_channels;
9095 if ( ainfo.caps & PCM_CAP_INPUT ) info.inputChannels = ainfo.max_channels;
9096 if ( ainfo.caps & PCM_CAP_DUPLEX ) {
9097 if ( info.outputChannels > 0 && info.inputChannels > 0 && ainfo.caps & PCM_CAP_DUPLEX )
9098 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
9101 // Probe data formats ... do for input
9102 unsigned long mask = ainfo.iformats;
9103 if ( mask & AFMT_S16_LE || mask & AFMT_S16_BE )
9104 info.nativeFormats |= RTAUDIO_SINT16;
9105 if ( mask & AFMT_S8 )
9106 info.nativeFormats |= RTAUDIO_SINT8;
9107 if ( mask & AFMT_S32_LE || mask & AFMT_S32_BE )
9108 info.nativeFormats |= RTAUDIO_SINT32;
9110 if ( mask & AFMT_FLOAT )
9111 info.nativeFormats |= RTAUDIO_FLOAT32;
9113 if ( mask & AFMT_S24_LE || mask & AFMT_S24_BE )
9114 info.nativeFormats |= RTAUDIO_SINT24;
9116 // Check that we have at least one supported format
9117 if ( info.nativeFormats == 0 ) {
9118 errorStream_ << "RtApiOss::getDeviceInfo: device (" << ainfo.name << ") data format not supported by RtAudio.";
9119 errorText_ = errorStream_.str();
9120 error( RtAudioError::WARNING );
9124 // Probe the supported sample rates.
9125 info.sampleRates.clear();
9126 if ( ainfo.nrates ) {
9127 for ( unsigned int i=0; i<ainfo.nrates; i++ ) {
9128 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
9129 if ( ainfo.rates[i] == SAMPLE_RATES[k] ) {
9130 info.sampleRates.push_back( SAMPLE_RATES[k] );
9132 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
9133 info.preferredSampleRate = SAMPLE_RATES[k];
9141 // Check min and max rate values;
9142 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
9143 if ( ainfo.min_rate <= (int) SAMPLE_RATES[k] && ainfo.max_rate >= (int) SAMPLE_RATES[k] ) {
9144 info.sampleRates.push_back( SAMPLE_RATES[k] );
9146 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
9147 info.preferredSampleRate = SAMPLE_RATES[k];
9152 if ( info.sampleRates.size() == 0 ) {
9153 errorStream_ << "RtApiOss::getDeviceInfo: no supported sample rates found for device (" << ainfo.name << ").";
9154 errorText_ = errorStream_.str();
9155 error( RtAudioError::WARNING );
9159 info.name = ainfo.name;
9166 bool RtApiOss :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
9167 unsigned int firstChannel, unsigned int sampleRate,
9168 RtAudioFormat format, unsigned int *bufferSize,
9169 RtAudio::StreamOptions *options )
9171 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
9172 if ( mixerfd == -1 ) {
9173 errorText_ = "RtApiOss::probeDeviceOpen: error opening '/dev/mixer'.";
9177 oss_sysinfo sysinfo;
9178 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
9179 if ( result == -1 ) {
9181 errorText_ = "RtApiOss::probeDeviceOpen: error getting sysinfo, OSS version >= 4.0 is required.";
9185 unsigned nDevices = sysinfo.numaudios;
9186 if ( nDevices == 0 ) {
9187 // This should not happen because a check is made before this function is called.
9189 errorText_ = "RtApiOss::probeDeviceOpen: no devices found!";
9193 if ( device >= nDevices ) {
9194 // This should not happen because a check is made before this function is called.
9196 errorText_ = "RtApiOss::probeDeviceOpen: device ID is invalid!";
9200 oss_audioinfo ainfo;
9202 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
9204 if ( result == -1 ) {
9205 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
9206 errorText_ = errorStream_.str();
9210 // Check if device supports input or output
9211 if ( ( mode == OUTPUT && !( ainfo.caps & PCM_CAP_OUTPUT ) ) ||
9212 ( mode == INPUT && !( ainfo.caps & PCM_CAP_INPUT ) ) ) {
9213 if ( mode == OUTPUT )
9214 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support output.";
9216 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support input.";
9217 errorText_ = errorStream_.str();
9222 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9223 if ( mode == OUTPUT )
9225 else { // mode == INPUT
9226 if (stream_.mode == OUTPUT && stream_.device[0] == device) {
9227 // We just set the same device for playback ... close and reopen for duplex (OSS only).
9228 close( handle->id[0] );
9230 if ( !( ainfo.caps & PCM_CAP_DUPLEX ) ) {
9231 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support duplex mode.";
9232 errorText_ = errorStream_.str();
9235 // Check that the number previously set channels is the same.
9236 if ( stream_.nUserChannels[0] != channels ) {
9237 errorStream_ << "RtApiOss::probeDeviceOpen: input/output channels must be equal for OSS duplex device (" << ainfo.name << ").";
9238 errorText_ = errorStream_.str();
9247 // Set exclusive access if specified.
9248 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) flags |= O_EXCL;
9250 // Try to open the device.
9252 fd = open( ainfo.devnode, flags, 0 );
9254 if ( errno == EBUSY )
9255 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") is busy.";
9257 errorStream_ << "RtApiOss::probeDeviceOpen: error opening device (" << ainfo.name << ").";
9258 errorText_ = errorStream_.str();
9262 // For duplex operation, specifically set this mode (this doesn't seem to work).
9264 if ( flags | O_RDWR ) {
9265 result = ioctl( fd, SNDCTL_DSP_SETDUPLEX, NULL );
9266 if ( result == -1) {
9267 errorStream_ << "RtApiOss::probeDeviceOpen: error setting duplex mode for device (" << ainfo.name << ").";
9268 errorText_ = errorStream_.str();
9274 // Check the device channel support.
9275 stream_.nUserChannels[mode] = channels;
9276 if ( ainfo.max_channels < (int)(channels + firstChannel) ) {
9278 errorStream_ << "RtApiOss::probeDeviceOpen: the device (" << ainfo.name << ") does not support requested channel parameters.";
9279 errorText_ = errorStream_.str();
9283 // Set the number of channels.
9284 int deviceChannels = channels + firstChannel;
9285 result = ioctl( fd, SNDCTL_DSP_CHANNELS, &deviceChannels );
9286 if ( result == -1 || deviceChannels < (int)(channels + firstChannel) ) {
9288 errorStream_ << "RtApiOss::probeDeviceOpen: error setting channel parameters on device (" << ainfo.name << ").";
9289 errorText_ = errorStream_.str();
9292 stream_.nDeviceChannels[mode] = deviceChannels;
9294 // Get the data format mask
9296 result = ioctl( fd, SNDCTL_DSP_GETFMTS, &mask );
9297 if ( result == -1 ) {
9299 errorStream_ << "RtApiOss::probeDeviceOpen: error getting device (" << ainfo.name << ") data formats.";
9300 errorText_ = errorStream_.str();
9304 // Determine how to set the device format.
9305 stream_.userFormat = format;
9306 int deviceFormat = -1;
9307 stream_.doByteSwap[mode] = false;
9308 if ( format == RTAUDIO_SINT8 ) {
9309 if ( mask & AFMT_S8 ) {
9310 deviceFormat = AFMT_S8;
9311 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
9314 else if ( format == RTAUDIO_SINT16 ) {
9315 if ( mask & AFMT_S16_NE ) {
9316 deviceFormat = AFMT_S16_NE;
9317 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9319 else if ( mask & AFMT_S16_OE ) {
9320 deviceFormat = AFMT_S16_OE;
9321 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9322 stream_.doByteSwap[mode] = true;
9325 else if ( format == RTAUDIO_SINT24 ) {
9326 if ( mask & AFMT_S24_NE ) {
9327 deviceFormat = AFMT_S24_NE;
9328 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9330 else if ( mask & AFMT_S24_OE ) {
9331 deviceFormat = AFMT_S24_OE;
9332 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9333 stream_.doByteSwap[mode] = true;
9336 else if ( format == RTAUDIO_SINT32 ) {
9337 if ( mask & AFMT_S32_NE ) {
9338 deviceFormat = AFMT_S32_NE;
9339 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9341 else if ( mask & AFMT_S32_OE ) {
9342 deviceFormat = AFMT_S32_OE;
9343 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9344 stream_.doByteSwap[mode] = true;
9348 if ( deviceFormat == -1 ) {
9349 // The user requested format is not natively supported by the device.
9350 if ( mask & AFMT_S16_NE ) {
9351 deviceFormat = AFMT_S16_NE;
9352 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9354 else if ( mask & AFMT_S32_NE ) {
9355 deviceFormat = AFMT_S32_NE;
9356 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9358 else if ( mask & AFMT_S24_NE ) {
9359 deviceFormat = AFMT_S24_NE;
9360 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9362 else if ( mask & AFMT_S16_OE ) {
9363 deviceFormat = AFMT_S16_OE;
9364 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9365 stream_.doByteSwap[mode] = true;
9367 else if ( mask & AFMT_S32_OE ) {
9368 deviceFormat = AFMT_S32_OE;
9369 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9370 stream_.doByteSwap[mode] = true;
9372 else if ( mask & AFMT_S24_OE ) {
9373 deviceFormat = AFMT_S24_OE;
9374 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9375 stream_.doByteSwap[mode] = true;
9377 else if ( mask & AFMT_S8) {
9378 deviceFormat = AFMT_S8;
9379 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
9383 if ( stream_.deviceFormat[mode] == 0 ) {
9384 // This really shouldn't happen ...
9386 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") data format not supported by RtAudio.";
9387 errorText_ = errorStream_.str();
9391 // Set the data format.
9392 int temp = deviceFormat;
9393 result = ioctl( fd, SNDCTL_DSP_SETFMT, &deviceFormat );
9394 if ( result == -1 || deviceFormat != temp ) {
9396 errorStream_ << "RtApiOss::probeDeviceOpen: error setting data format on device (" << ainfo.name << ").";
9397 errorText_ = errorStream_.str();
9401 // Attempt to set the buffer size. According to OSS, the minimum
9402 // number of buffers is two. The supposed minimum buffer size is 16
9403 // bytes, so that will be our lower bound. The argument to this
9404 // call is in the form 0xMMMMSSSS (hex), where the buffer size (in
9405 // bytes) is given as 2^SSSS and the number of buffers as 2^MMMM.
9406 // We'll check the actual value used near the end of the setup
9408 int ossBufferBytes = *bufferSize * formatBytes( stream_.deviceFormat[mode] ) * deviceChannels;
9409 if ( ossBufferBytes < 16 ) ossBufferBytes = 16;
9411 if ( options ) buffers = options->numberOfBuffers;
9412 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) buffers = 2;
9413 if ( buffers < 2 ) buffers = 3;
9414 temp = ((int) buffers << 16) + (int)( log10( (double)ossBufferBytes ) / log10( 2.0 ) );
9415 result = ioctl( fd, SNDCTL_DSP_SETFRAGMENT, &temp );
9416 if ( result == -1 ) {
9418 errorStream_ << "RtApiOss::probeDeviceOpen: error setting buffer size on device (" << ainfo.name << ").";
9419 errorText_ = errorStream_.str();
9422 stream_.nBuffers = buffers;
9424 // Save buffer size (in sample frames).
9425 *bufferSize = ossBufferBytes / ( formatBytes(stream_.deviceFormat[mode]) * deviceChannels );
9426 stream_.bufferSize = *bufferSize;
9428 // Set the sample rate.
9429 int srate = sampleRate;
9430 result = ioctl( fd, SNDCTL_DSP_SPEED, &srate );
9431 if ( result == -1 ) {
9433 errorStream_ << "RtApiOss::probeDeviceOpen: error setting sample rate (" << sampleRate << ") on device (" << ainfo.name << ").";
9434 errorText_ = errorStream_.str();
9438 // Verify the sample rate setup worked.
9439 if ( abs( srate - (int)sampleRate ) > 100 ) {
9441 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support sample rate (" << sampleRate << ").";
9442 errorText_ = errorStream_.str();
9445 stream_.sampleRate = sampleRate;
9447 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device) {
9448 // We're doing duplex setup here.
9449 stream_.deviceFormat[0] = stream_.deviceFormat[1];
9450 stream_.nDeviceChannels[0] = deviceChannels;
9453 // Set interleaving parameters.
9454 stream_.userInterleaved = true;
9455 stream_.deviceInterleaved[mode] = true;
9456 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
9457 stream_.userInterleaved = false;
9459 // Set flags for buffer conversion
9460 stream_.doConvertBuffer[mode] = false;
9461 if ( stream_.userFormat != stream_.deviceFormat[mode] )
9462 stream_.doConvertBuffer[mode] = true;
9463 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
9464 stream_.doConvertBuffer[mode] = true;
9465 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
9466 stream_.nUserChannels[mode] > 1 )
9467 stream_.doConvertBuffer[mode] = true;
9469 // Allocate the stream handles if necessary and then save.
9470 if ( stream_.apiHandle == 0 ) {
9472 handle = new OssHandle;
9474 catch ( std::bad_alloc& ) {
9475 errorText_ = "RtApiOss::probeDeviceOpen: error allocating OssHandle memory.";
9479 if ( pthread_cond_init( &handle->runnable, NULL ) ) {
9480 errorText_ = "RtApiOss::probeDeviceOpen: error initializing pthread condition variable.";
9484 stream_.apiHandle = (void *) handle;
9487 handle = (OssHandle *) stream_.apiHandle;
9489 handle->id[mode] = fd;
9491 // Allocate necessary internal buffers.
9492 unsigned long bufferBytes;
9493 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
9494 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
9495 if ( stream_.userBuffer[mode] == NULL ) {
9496 errorText_ = "RtApiOss::probeDeviceOpen: error allocating user buffer memory.";
9500 if ( stream_.doConvertBuffer[mode] ) {
9502 bool makeBuffer = true;
9503 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
9504 if ( mode == INPUT ) {
9505 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
9506 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
9507 if ( bufferBytes <= bytesOut ) makeBuffer = false;
9512 bufferBytes *= *bufferSize;
9513 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
9514 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
9515 if ( stream_.deviceBuffer == NULL ) {
9516 errorText_ = "RtApiOss::probeDeviceOpen: error allocating device buffer memory.";
9522 stream_.device[mode] = device;
9523 stream_.state = STREAM_STOPPED;
9525 // Setup the buffer conversion information structure.
9526 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
9528 // Setup thread if necessary.
9529 if ( stream_.mode == OUTPUT && mode == INPUT ) {
9530 // We had already set up an output stream.
9531 stream_.mode = DUPLEX;
9532 if ( stream_.device[0] == device ) handle->id[0] = fd;
9535 stream_.mode = mode;
9537 // Setup callback thread.
9538 stream_.callbackInfo.object = (void *) this;
9540 // Set the thread attributes for joinable and realtime scheduling
9541 // priority. The higher priority will only take affect if the
9542 // program is run as root or suid.
9543 pthread_attr_t attr;
9544 pthread_attr_init( &attr );
9545 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
9546 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
9547 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
9548 stream_.callbackInfo.doRealtime = true;
9549 struct sched_param param;
9550 int priority = options->priority;
9551 int min = sched_get_priority_min( SCHED_RR );
9552 int max = sched_get_priority_max( SCHED_RR );
9553 if ( priority < min ) priority = min;
9554 else if ( priority > max ) priority = max;
9555 param.sched_priority = priority;
9557 // Set the policy BEFORE the priority. Otherwise it fails.
9558 pthread_attr_setschedpolicy(&attr, SCHED_RR);
9559 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
9560 // This is definitely required. Otherwise it fails.
9561 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
9562 pthread_attr_setschedparam(&attr, ¶m);
9565 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
9567 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
9570 stream_.callbackInfo.isRunning = true;
9571 result = pthread_create( &stream_.callbackInfo.thread, &attr, ossCallbackHandler, &stream_.callbackInfo );
9572 pthread_attr_destroy( &attr );
9574 // Failed. Try instead with default attributes.
9575 result = pthread_create( &stream_.callbackInfo.thread, NULL, ossCallbackHandler, &stream_.callbackInfo );
9577 stream_.callbackInfo.isRunning = false;
9578 errorText_ = "RtApiOss::error creating callback thread!";
9588 pthread_cond_destroy( &handle->runnable );
9589 if ( handle->id[0] ) close( handle->id[0] );
9590 if ( handle->id[1] ) close( handle->id[1] );
9592 stream_.apiHandle = 0;
9595 for ( int i=0; i<2; i++ ) {
9596 if ( stream_.userBuffer[i] ) {
9597 free( stream_.userBuffer[i] );
9598 stream_.userBuffer[i] = 0;
9602 if ( stream_.deviceBuffer ) {
9603 free( stream_.deviceBuffer );
9604 stream_.deviceBuffer = 0;
9607 stream_.state = STREAM_CLOSED;
9611 void RtApiOss :: closeStream()
9613 if ( stream_.state == STREAM_CLOSED ) {
9614 errorText_ = "RtApiOss::closeStream(): no open stream to close!";
9615 error( RtAudioError::WARNING );
9619 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9620 stream_.callbackInfo.isRunning = false;
9621 MUTEX_LOCK( &stream_.mutex );
9622 if ( stream_.state == STREAM_STOPPED )
9623 pthread_cond_signal( &handle->runnable );
9624 MUTEX_UNLOCK( &stream_.mutex );
9625 pthread_join( stream_.callbackInfo.thread, NULL );
9627 if ( stream_.state == STREAM_RUNNING ) {
9628 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
9629 ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9631 ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9632 stream_.state = STREAM_STOPPED;
9636 pthread_cond_destroy( &handle->runnable );
9637 if ( handle->id[0] ) close( handle->id[0] );
9638 if ( handle->id[1] ) close( handle->id[1] );
9640 stream_.apiHandle = 0;
9643 for ( int i=0; i<2; i++ ) {
9644 if ( stream_.userBuffer[i] ) {
9645 free( stream_.userBuffer[i] );
9646 stream_.userBuffer[i] = 0;
9650 if ( stream_.deviceBuffer ) {
9651 free( stream_.deviceBuffer );
9652 stream_.deviceBuffer = 0;
9655 stream_.mode = UNINITIALIZED;
9656 stream_.state = STREAM_CLOSED;
9659 void RtApiOss :: startStream()
9662 if ( stream_.state == STREAM_RUNNING ) {
9663 errorText_ = "RtApiOss::startStream(): the stream is already running!";
9664 error( RtAudioError::WARNING );
9668 MUTEX_LOCK( &stream_.mutex );
9670 #if defined( HAVE_GETTIMEOFDAY )
9671 gettimeofday( &stream_.lastTickTimestamp, NULL );
9674 stream_.state = STREAM_RUNNING;
9676 // No need to do anything else here ... OSS automatically starts
9677 // when fed samples.
9679 MUTEX_UNLOCK( &stream_.mutex );
9681 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9682 pthread_cond_signal( &handle->runnable );
9685 void RtApiOss :: stopStream()
9688 if ( stream_.state == STREAM_STOPPED ) {
9689 errorText_ = "RtApiOss::stopStream(): the stream is already stopped!";
9690 error( RtAudioError::WARNING );
9694 MUTEX_LOCK( &stream_.mutex );
9696 // The state might change while waiting on a mutex.
9697 if ( stream_.state == STREAM_STOPPED ) {
9698 MUTEX_UNLOCK( &stream_.mutex );
9703 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9704 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9706 // Flush the output with zeros a few times.
9709 RtAudioFormat format;
9711 if ( stream_.doConvertBuffer[0] ) {
9712 buffer = stream_.deviceBuffer;
9713 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
9714 format = stream_.deviceFormat[0];
9717 buffer = stream_.userBuffer[0];
9718 samples = stream_.bufferSize * stream_.nUserChannels[0];
9719 format = stream_.userFormat;
9722 memset( buffer, 0, samples * formatBytes(format) );
9723 for ( unsigned int i=0; i<stream_.nBuffers+1; i++ ) {
9724 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9725 if ( result == -1 ) {
9726 errorText_ = "RtApiOss::stopStream: audio write error.";
9727 error( RtAudioError::WARNING );
9731 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9732 if ( result == -1 ) {
9733 errorStream_ << "RtApiOss::stopStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
9734 errorText_ = errorStream_.str();
9737 handle->triggered = false;
9740 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
9741 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9742 if ( result == -1 ) {
9743 errorStream_ << "RtApiOss::stopStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
9744 errorText_ = errorStream_.str();
9750 stream_.state = STREAM_STOPPED;
9751 MUTEX_UNLOCK( &stream_.mutex );
9753 if ( result != -1 ) return;
9754 error( RtAudioError::SYSTEM_ERROR );
9757 void RtApiOss :: abortStream()
9760 if ( stream_.state == STREAM_STOPPED ) {
9761 errorText_ = "RtApiOss::abortStream(): the stream is already stopped!";
9762 error( RtAudioError::WARNING );
9766 MUTEX_LOCK( &stream_.mutex );
9768 // The state might change while waiting on a mutex.
9769 if ( stream_.state == STREAM_STOPPED ) {
9770 MUTEX_UNLOCK( &stream_.mutex );
9775 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9776 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9777 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9778 if ( result == -1 ) {
9779 errorStream_ << "RtApiOss::abortStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
9780 errorText_ = errorStream_.str();
9783 handle->triggered = false;
9786 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
9787 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9788 if ( result == -1 ) {
9789 errorStream_ << "RtApiOss::abortStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
9790 errorText_ = errorStream_.str();
9796 stream_.state = STREAM_STOPPED;
9797 MUTEX_UNLOCK( &stream_.mutex );
9799 if ( result != -1 ) return;
9800 error( RtAudioError::SYSTEM_ERROR );
9803 void RtApiOss :: callbackEvent()
9805 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9806 if ( stream_.state == STREAM_STOPPED ) {
9807 MUTEX_LOCK( &stream_.mutex );
9808 pthread_cond_wait( &handle->runnable, &stream_.mutex );
9809 if ( stream_.state != STREAM_RUNNING ) {
9810 MUTEX_UNLOCK( &stream_.mutex );
9813 MUTEX_UNLOCK( &stream_.mutex );
9816 if ( stream_.state == STREAM_CLOSED ) {
9817 errorText_ = "RtApiOss::callbackEvent(): the stream is closed ... this shouldn't happen!";
9818 error( RtAudioError::WARNING );
9822 // Invoke user callback to get fresh output data.
9823 int doStopStream = 0;
9824 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
9825 double streamTime = getStreamTime();
9826 RtAudioStreamStatus status = 0;
9827 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
9828 status |= RTAUDIO_OUTPUT_UNDERFLOW;
9829 handle->xrun[0] = false;
9831 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
9832 status |= RTAUDIO_INPUT_OVERFLOW;
9833 handle->xrun[1] = false;
9835 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
9836 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
9837 if ( doStopStream == 2 ) {
9838 this->abortStream();
9842 MUTEX_LOCK( &stream_.mutex );
9844 // The state might change while waiting on a mutex.
9845 if ( stream_.state == STREAM_STOPPED ) goto unlock;
9850 RtAudioFormat format;
9852 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9854 // Setup parameters and do buffer conversion if necessary.
9855 if ( stream_.doConvertBuffer[0] ) {
9856 buffer = stream_.deviceBuffer;
9857 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
9858 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
9859 format = stream_.deviceFormat[0];
9862 buffer = stream_.userBuffer[0];
9863 samples = stream_.bufferSize * stream_.nUserChannels[0];
9864 format = stream_.userFormat;
9867 // Do byte swapping if necessary.
9868 if ( stream_.doByteSwap[0] )
9869 byteSwapBuffer( buffer, samples, format );
9871 if ( stream_.mode == DUPLEX && handle->triggered == false ) {
9873 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
9874 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9875 trig = PCM_ENABLE_INPUT|PCM_ENABLE_OUTPUT;
9876 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
9877 handle->triggered = true;
9880 // Write samples to device.
9881 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9883 if ( result == -1 ) {
9884 // We'll assume this is an underrun, though there isn't a
9885 // specific means for determining that.
9886 handle->xrun[0] = true;
9887 errorText_ = "RtApiOss::callbackEvent: audio write error.";
9888 error( RtAudioError::WARNING );
9889 // Continue on to input section.
9893 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
9895 // Setup parameters.
9896 if ( stream_.doConvertBuffer[1] ) {
9897 buffer = stream_.deviceBuffer;
9898 samples = stream_.bufferSize * stream_.nDeviceChannels[1];
9899 format = stream_.deviceFormat[1];
9902 buffer = stream_.userBuffer[1];
9903 samples = stream_.bufferSize * stream_.nUserChannels[1];
9904 format = stream_.userFormat;
9907 // Read samples from device.
9908 result = read( handle->id[1], buffer, samples * formatBytes(format) );
9910 if ( result == -1 ) {
9911 // We'll assume this is an overrun, though there isn't a
9912 // specific means for determining that.
9913 handle->xrun[1] = true;
9914 errorText_ = "RtApiOss::callbackEvent: audio read error.";
9915 error( RtAudioError::WARNING );
9919 // Do byte swapping if necessary.
9920 if ( stream_.doByteSwap[1] )
9921 byteSwapBuffer( buffer, samples, format );
9923 // Do buffer conversion if necessary.
9924 if ( stream_.doConvertBuffer[1] )
9925 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
9929 MUTEX_UNLOCK( &stream_.mutex );
9931 RtApi::tickStreamTime();
9932 if ( doStopStream == 1 ) this->stopStream();
9935 static void *ossCallbackHandler( void *ptr )
9937 CallbackInfo *info = (CallbackInfo *) ptr;
9938 RtApiOss *object = (RtApiOss *) info->object;
9939 bool *isRunning = &info->isRunning;
9941 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
9942 if (info->doRealtime) {
9943 std::cerr << "RtAudio oss: " <<
9944 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
9945 "running realtime scheduling" << std::endl;
9949 while ( *isRunning == true ) {
9950 pthread_testcancel();
9951 object->callbackEvent();
9954 pthread_exit( NULL );
9957 //******************** End of __LINUX_OSS__ *********************//
9961 // *************************************************** //
9963 // Protected common (OS-independent) RtAudio methods.
9965 // *************************************************** //
9967 // This method can be modified to control the behavior of error
9968 // message printing.
9969 void RtApi :: error( RtAudioError::Type type )
9971 errorStream_.str(""); // clear the ostringstream
9973 RtAudioErrorCallback errorCallback = (RtAudioErrorCallback) stream_.callbackInfo.errorCallback;
9974 if ( errorCallback ) {
9975 // abortStream() can generate new error messages. Ignore them. Just keep original one.
9977 if ( firstErrorOccurred_ )
9980 firstErrorOccurred_ = true;
9981 const std::string errorMessage = errorText_;
9983 if ( type != RtAudioError::WARNING && stream_.state != STREAM_STOPPED) {
9984 stream_.callbackInfo.isRunning = false; // exit from the thread
9988 errorCallback( type, errorMessage );
9989 firstErrorOccurred_ = false;
9993 if ( type == RtAudioError::WARNING && showWarnings_ == true )
9994 std::cerr << '\n' << errorText_ << "\n\n";
9995 else if ( type != RtAudioError::WARNING )
9996 throw( RtAudioError( errorText_, type ) );
9999 void RtApi :: verifyStream()
10001 if ( stream_.state == STREAM_CLOSED ) {
10002 errorText_ = "RtApi:: a stream is not open!";
10003 error( RtAudioError::INVALID_USE );
10007 void RtApi :: clearStreamInfo()
10009 stream_.mode = UNINITIALIZED;
10010 stream_.state = STREAM_CLOSED;
10011 stream_.sampleRate = 0;
10012 stream_.bufferSize = 0;
10013 stream_.nBuffers = 0;
10014 stream_.userFormat = 0;
10015 stream_.userInterleaved = true;
10016 stream_.streamTime = 0.0;
10017 stream_.apiHandle = 0;
10018 stream_.deviceBuffer = 0;
10019 stream_.callbackInfo.callback = 0;
10020 stream_.callbackInfo.userData = 0;
10021 stream_.callbackInfo.isRunning = false;
10022 stream_.callbackInfo.errorCallback = 0;
10023 for ( int i=0; i<2; i++ ) {
10024 stream_.device[i] = 11111;
10025 stream_.doConvertBuffer[i] = false;
10026 stream_.deviceInterleaved[i] = true;
10027 stream_.doByteSwap[i] = false;
10028 stream_.nUserChannels[i] = 0;
10029 stream_.nDeviceChannels[i] = 0;
10030 stream_.channelOffset[i] = 0;
10031 stream_.deviceFormat[i] = 0;
10032 stream_.latency[i] = 0;
10033 stream_.userBuffer[i] = 0;
10034 stream_.convertInfo[i].channels = 0;
10035 stream_.convertInfo[i].inJump = 0;
10036 stream_.convertInfo[i].outJump = 0;
10037 stream_.convertInfo[i].inFormat = 0;
10038 stream_.convertInfo[i].outFormat = 0;
10039 stream_.convertInfo[i].inOffset.clear();
10040 stream_.convertInfo[i].outOffset.clear();
10044 unsigned int RtApi :: formatBytes( RtAudioFormat format )
10046 if ( format == RTAUDIO_SINT16 )
10048 else if ( format == RTAUDIO_SINT32 || format == RTAUDIO_FLOAT32 )
10050 else if ( format == RTAUDIO_FLOAT64 )
10052 else if ( format == RTAUDIO_SINT24 )
10054 else if ( format == RTAUDIO_SINT8 )
10057 errorText_ = "RtApi::formatBytes: undefined format.";
10058 error( RtAudioError::WARNING );
10063 void RtApi :: setConvertInfo( StreamMode mode, unsigned int firstChannel )
10065 if ( mode == INPUT ) { // convert device to user buffer
10066 stream_.convertInfo[mode].inJump = stream_.nDeviceChannels[1];
10067 stream_.convertInfo[mode].outJump = stream_.nUserChannels[1];
10068 stream_.convertInfo[mode].inFormat = stream_.deviceFormat[1];
10069 stream_.convertInfo[mode].outFormat = stream_.userFormat;
10071 else { // convert user to device buffer
10072 stream_.convertInfo[mode].inJump = stream_.nUserChannels[0];
10073 stream_.convertInfo[mode].outJump = stream_.nDeviceChannels[0];
10074 stream_.convertInfo[mode].inFormat = stream_.userFormat;
10075 stream_.convertInfo[mode].outFormat = stream_.deviceFormat[0];
10078 if ( stream_.convertInfo[mode].inJump < stream_.convertInfo[mode].outJump )
10079 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].inJump;
10081 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].outJump;
10083 // Set up the interleave/deinterleave offsets.
10084 if ( stream_.deviceInterleaved[mode] != stream_.userInterleaved ) {
10085 if ( ( mode == OUTPUT && stream_.deviceInterleaved[mode] ) ||
10086 ( mode == INPUT && stream_.userInterleaved ) ) {
10087 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
10088 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
10089 stream_.convertInfo[mode].outOffset.push_back( k );
10090 stream_.convertInfo[mode].inJump = 1;
10094 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
10095 stream_.convertInfo[mode].inOffset.push_back( k );
10096 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
10097 stream_.convertInfo[mode].outJump = 1;
10101 else { // no (de)interleaving
10102 if ( stream_.userInterleaved ) {
10103 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
10104 stream_.convertInfo[mode].inOffset.push_back( k );
10105 stream_.convertInfo[mode].outOffset.push_back( k );
10109 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
10110 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
10111 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
10112 stream_.convertInfo[mode].inJump = 1;
10113 stream_.convertInfo[mode].outJump = 1;
10118 // Add channel offset.
10119 if ( firstChannel > 0 ) {
10120 if ( stream_.deviceInterleaved[mode] ) {
10121 if ( mode == OUTPUT ) {
10122 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
10123 stream_.convertInfo[mode].outOffset[k] += firstChannel;
10126 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
10127 stream_.convertInfo[mode].inOffset[k] += firstChannel;
10131 if ( mode == OUTPUT ) {
10132 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
10133 stream_.convertInfo[mode].outOffset[k] += ( firstChannel * stream_.bufferSize );
10136 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
10137 stream_.convertInfo[mode].inOffset[k] += ( firstChannel * stream_.bufferSize );
10143 void RtApi :: convertBuffer( char *outBuffer, char *inBuffer, ConvertInfo &info )
10145 // This function does format conversion, input/output channel compensation, and
10146 // data interleaving/deinterleaving. 24-bit integers are assumed to occupy
10147 // the lower three bytes of a 32-bit integer.
10149 // Clear our device buffer when in/out duplex device channels are different
10150 if ( outBuffer == stream_.deviceBuffer && stream_.mode == DUPLEX &&
10151 ( stream_.nDeviceChannels[0] < stream_.nDeviceChannels[1] ) )
10152 memset( outBuffer, 0, stream_.bufferSize * info.outJump * formatBytes( info.outFormat ) );
10155 if (info.outFormat == RTAUDIO_FLOAT64) {
10157 Float64 *out = (Float64 *)outBuffer;
10159 if (info.inFormat == RTAUDIO_SINT8) {
10160 signed char *in = (signed char *)inBuffer;
10161 scale = 1.0 / 127.5;
10162 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10163 for (j=0; j<info.channels; j++) {
10164 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
10165 out[info.outOffset[j]] += 0.5;
10166 out[info.outOffset[j]] *= scale;
10169 out += info.outJump;
10172 else if (info.inFormat == RTAUDIO_SINT16) {
10173 Int16 *in = (Int16 *)inBuffer;
10174 scale = 1.0 / 32767.5;
10175 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10176 for (j=0; j<info.channels; j++) {
10177 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
10178 out[info.outOffset[j]] += 0.5;
10179 out[info.outOffset[j]] *= scale;
10182 out += info.outJump;
10185 else if (info.inFormat == RTAUDIO_SINT24) {
10186 Int24 *in = (Int24 *)inBuffer;
10187 scale = 1.0 / 8388607.5;
10188 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10189 for (j=0; j<info.channels; j++) {
10190 out[info.outOffset[j]] = (Float64) (in[info.inOffset[j]].asInt());
10191 out[info.outOffset[j]] += 0.5;
10192 out[info.outOffset[j]] *= scale;
10195 out += info.outJump;
10198 else if (info.inFormat == RTAUDIO_SINT32) {
10199 Int32 *in = (Int32 *)inBuffer;
10200 scale = 1.0 / 2147483647.5;
10201 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10202 for (j=0; j<info.channels; j++) {
10203 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
10204 out[info.outOffset[j]] += 0.5;
10205 out[info.outOffset[j]] *= scale;
10208 out += info.outJump;
10211 else if (info.inFormat == RTAUDIO_FLOAT32) {
10212 Float32 *in = (Float32 *)inBuffer;
10213 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10214 for (j=0; j<info.channels; j++) {
10215 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
10218 out += info.outJump;
10221 else if (info.inFormat == RTAUDIO_FLOAT64) {
10222 // Channel compensation and/or (de)interleaving only.
10223 Float64 *in = (Float64 *)inBuffer;
10224 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10225 for (j=0; j<info.channels; j++) {
10226 out[info.outOffset[j]] = in[info.inOffset[j]];
10229 out += info.outJump;
10233 else if (info.outFormat == RTAUDIO_FLOAT32) {
10235 Float32 *out = (Float32 *)outBuffer;
10237 if (info.inFormat == RTAUDIO_SINT8) {
10238 signed char *in = (signed char *)inBuffer;
10239 scale = (Float32) ( 1.0 / 127.5 );
10240 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10241 for (j=0; j<info.channels; j++) {
10242 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10243 out[info.outOffset[j]] += 0.5;
10244 out[info.outOffset[j]] *= scale;
10247 out += info.outJump;
10250 else if (info.inFormat == RTAUDIO_SINT16) {
10251 Int16 *in = (Int16 *)inBuffer;
10252 scale = (Float32) ( 1.0 / 32767.5 );
10253 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10254 for (j=0; j<info.channels; j++) {
10255 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10256 out[info.outOffset[j]] += 0.5;
10257 out[info.outOffset[j]] *= scale;
10260 out += info.outJump;
10263 else if (info.inFormat == RTAUDIO_SINT24) {
10264 Int24 *in = (Int24 *)inBuffer;
10265 scale = (Float32) ( 1.0 / 8388607.5 );
10266 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10267 for (j=0; j<info.channels; j++) {
10268 out[info.outOffset[j]] = (Float32) (in[info.inOffset[j]].asInt());
10269 out[info.outOffset[j]] += 0.5;
10270 out[info.outOffset[j]] *= scale;
10273 out += info.outJump;
10276 else if (info.inFormat == RTAUDIO_SINT32) {
10277 Int32 *in = (Int32 *)inBuffer;
10278 scale = (Float32) ( 1.0 / 2147483647.5 );
10279 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10280 for (j=0; j<info.channels; j++) {
10281 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10282 out[info.outOffset[j]] += 0.5;
10283 out[info.outOffset[j]] *= scale;
10286 out += info.outJump;
10289 else if (info.inFormat == RTAUDIO_FLOAT32) {
10290 // Channel compensation and/or (de)interleaving only.
10291 Float32 *in = (Float32 *)inBuffer;
10292 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10293 for (j=0; j<info.channels; j++) {
10294 out[info.outOffset[j]] = in[info.inOffset[j]];
10297 out += info.outJump;
10300 else if (info.inFormat == RTAUDIO_FLOAT64) {
10301 Float64 *in = (Float64 *)inBuffer;
10302 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10303 for (j=0; j<info.channels; j++) {
10304 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10307 out += info.outJump;
10311 else if (info.outFormat == RTAUDIO_SINT32) {
10312 Int32 *out = (Int32 *)outBuffer;
10313 if (info.inFormat == RTAUDIO_SINT8) {
10314 signed char *in = (signed char *)inBuffer;
10315 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10316 for (j=0; j<info.channels; j++) {
10317 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
10318 out[info.outOffset[j]] <<= 24;
10321 out += info.outJump;
10324 else if (info.inFormat == RTAUDIO_SINT16) {
10325 Int16 *in = (Int16 *)inBuffer;
10326 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10327 for (j=0; j<info.channels; j++) {
10328 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
10329 out[info.outOffset[j]] <<= 16;
10332 out += info.outJump;
10335 else if (info.inFormat == RTAUDIO_SINT24) {
10336 Int24 *in = (Int24 *)inBuffer;
10337 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10338 for (j=0; j<info.channels; j++) {
10339 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]].asInt();
10340 out[info.outOffset[j]] <<= 8;
10343 out += info.outJump;
10346 else if (info.inFormat == RTAUDIO_SINT32) {
10347 // Channel compensation and/or (de)interleaving only.
10348 Int32 *in = (Int32 *)inBuffer;
10349 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10350 for (j=0; j<info.channels; j++) {
10351 out[info.outOffset[j]] = in[info.inOffset[j]];
10354 out += info.outJump;
10357 else if (info.inFormat == RTAUDIO_FLOAT32) {
10358 Float32 *in = (Float32 *)inBuffer;
10359 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10360 for (j=0; j<info.channels; j++) {
10361 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
10364 out += info.outJump;
10367 else if (info.inFormat == RTAUDIO_FLOAT64) {
10368 Float64 *in = (Float64 *)inBuffer;
10369 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10370 for (j=0; j<info.channels; j++) {
10371 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
10374 out += info.outJump;
10378 else if (info.outFormat == RTAUDIO_SINT24) {
10379 Int24 *out = (Int24 *)outBuffer;
10380 if (info.inFormat == RTAUDIO_SINT8) {
10381 signed char *in = (signed char *)inBuffer;
10382 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10383 for (j=0; j<info.channels; j++) {
10384 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 16);
10385 //out[info.outOffset[j]] <<= 16;
10388 out += info.outJump;
10391 else if (info.inFormat == RTAUDIO_SINT16) {
10392 Int16 *in = (Int16 *)inBuffer;
10393 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10394 for (j=0; j<info.channels; j++) {
10395 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 8);
10396 //out[info.outOffset[j]] <<= 8;
10399 out += info.outJump;
10402 else if (info.inFormat == RTAUDIO_SINT24) {
10403 // Channel compensation and/or (de)interleaving only.
10404 Int24 *in = (Int24 *)inBuffer;
10405 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10406 for (j=0; j<info.channels; j++) {
10407 out[info.outOffset[j]] = in[info.inOffset[j]];
10410 out += info.outJump;
10413 else if (info.inFormat == RTAUDIO_SINT32) {
10414 Int32 *in = (Int32 *)inBuffer;
10415 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10416 for (j=0; j<info.channels; j++) {
10417 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] >> 8);
10418 //out[info.outOffset[j]] >>= 8;
10421 out += info.outJump;
10424 else if (info.inFormat == RTAUDIO_FLOAT32) {
10425 Float32 *in = (Float32 *)inBuffer;
10426 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10427 for (j=0; j<info.channels; j++) {
10428 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
10431 out += info.outJump;
10434 else if (info.inFormat == RTAUDIO_FLOAT64) {
10435 Float64 *in = (Float64 *)inBuffer;
10436 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10437 for (j=0; j<info.channels; j++) {
10438 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
10441 out += info.outJump;
10445 else if (info.outFormat == RTAUDIO_SINT16) {
10446 Int16 *out = (Int16 *)outBuffer;
10447 if (info.inFormat == RTAUDIO_SINT8) {
10448 signed char *in = (signed char *)inBuffer;
10449 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10450 for (j=0; j<info.channels; j++) {
10451 out[info.outOffset[j]] = (Int16) in[info.inOffset[j]];
10452 out[info.outOffset[j]] <<= 8;
10455 out += info.outJump;
10458 else if (info.inFormat == RTAUDIO_SINT16) {
10459 // Channel compensation and/or (de)interleaving only.
10460 Int16 *in = (Int16 *)inBuffer;
10461 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10462 for (j=0; j<info.channels; j++) {
10463 out[info.outOffset[j]] = in[info.inOffset[j]];
10466 out += info.outJump;
10469 else if (info.inFormat == RTAUDIO_SINT24) {
10470 Int24 *in = (Int24 *)inBuffer;
10471 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10472 for (j=0; j<info.channels; j++) {
10473 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]].asInt() >> 8);
10476 out += info.outJump;
10479 else if (info.inFormat == RTAUDIO_SINT32) {
10480 Int32 *in = (Int32 *)inBuffer;
10481 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10482 for (j=0; j<info.channels; j++) {
10483 out[info.outOffset[j]] = (Int16) ((in[info.inOffset[j]] >> 16) & 0x0000ffff);
10486 out += info.outJump;
10489 else if (info.inFormat == RTAUDIO_FLOAT32) {
10490 Float32 *in = (Float32 *)inBuffer;
10491 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10492 for (j=0; j<info.channels; j++) {
10493 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
10496 out += info.outJump;
10499 else if (info.inFormat == RTAUDIO_FLOAT64) {
10500 Float64 *in = (Float64 *)inBuffer;
10501 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10502 for (j=0; j<info.channels; j++) {
10503 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
10506 out += info.outJump;
10510 else if (info.outFormat == RTAUDIO_SINT8) {
10511 signed char *out = (signed char *)outBuffer;
10512 if (info.inFormat == RTAUDIO_SINT8) {
10513 // Channel compensation and/or (de)interleaving only.
10514 signed char *in = (signed char *)inBuffer;
10515 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10516 for (j=0; j<info.channels; j++) {
10517 out[info.outOffset[j]] = in[info.inOffset[j]];
10520 out += info.outJump;
10523 if (info.inFormat == RTAUDIO_SINT16) {
10524 Int16 *in = (Int16 *)inBuffer;
10525 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10526 for (j=0; j<info.channels; j++) {
10527 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 8) & 0x00ff);
10530 out += info.outJump;
10533 else if (info.inFormat == RTAUDIO_SINT24) {
10534 Int24 *in = (Int24 *)inBuffer;
10535 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10536 for (j=0; j<info.channels; j++) {
10537 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]].asInt() >> 16);
10540 out += info.outJump;
10543 else if (info.inFormat == RTAUDIO_SINT32) {
10544 Int32 *in = (Int32 *)inBuffer;
10545 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10546 for (j=0; j<info.channels; j++) {
10547 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 24) & 0x000000ff);
10550 out += info.outJump;
10553 else if (info.inFormat == RTAUDIO_FLOAT32) {
10554 Float32 *in = (Float32 *)inBuffer;
10555 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10556 for (j=0; j<info.channels; j++) {
10557 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
10560 out += info.outJump;
10563 else if (info.inFormat == RTAUDIO_FLOAT64) {
10564 Float64 *in = (Float64 *)inBuffer;
10565 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10566 for (j=0; j<info.channels; j++) {
10567 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
10570 out += info.outJump;
10576 //static inline uint16_t bswap_16(uint16_t x) { return (x>>8) | (x<<8); }
10577 //static inline uint32_t bswap_32(uint32_t x) { return (bswap_16(x&0xffff)<<16) | (bswap_16(x>>16)); }
10578 //static inline uint64_t bswap_64(uint64_t x) { return (((unsigned long long)bswap_32(x&0xffffffffull))<<32) | (bswap_32(x>>32)); }
10580 void RtApi :: byteSwapBuffer( char *buffer, unsigned int samples, RtAudioFormat format )
10586 if ( format == RTAUDIO_SINT16 ) {
10587 for ( unsigned int i=0; i<samples; i++ ) {
10588 // Swap 1st and 2nd bytes.
10593 // Increment 2 bytes.
10597 else if ( format == RTAUDIO_SINT32 ||
10598 format == RTAUDIO_FLOAT32 ) {
10599 for ( unsigned int i=0; i<samples; i++ ) {
10600 // Swap 1st and 4th bytes.
10605 // Swap 2nd and 3rd bytes.
10611 // Increment 3 more bytes.
10615 else if ( format == RTAUDIO_SINT24 ) {
10616 for ( unsigned int i=0; i<samples; i++ ) {
10617 // Swap 1st and 3rd bytes.
10622 // Increment 2 more bytes.
10626 else if ( format == RTAUDIO_FLOAT64 ) {
10627 for ( unsigned int i=0; i<samples; i++ ) {
10628 // Swap 1st and 8th bytes
10633 // Swap 2nd and 7th bytes
10639 // Swap 3rd and 6th bytes
10645 // Swap 4th and 5th bytes
10651 // Increment 5 more bytes.
10657 // Indentation settings for Vim and Emacs
10659 // Local Variables:
10660 // c-basic-offset: 2
10661 // indent-tabs-mode: nil
10664 // vim: et sts=2 sw=2