1 /************************************************************************/
3 \brief Realtime audio i/o C++ classes.
5 RtAudio provides a common API (Application Programming Interface)
6 for realtime audio input/output across Linux (native ALSA, Jack,
7 and OSS), Macintosh OS X (CoreAudio and Jack), and Windows
8 (DirectSound, ASIO and WASAPI) operating systems.
10 RtAudio WWW site: http://www.music.mcgill.ca/~gary/rtaudio/
12 RtAudio: realtime audio i/o C++ classes
13 Copyright (c) 2001-2017 Gary P. Scavone
15 Permission is hereby granted, free of charge, to any person
16 obtaining a copy of this software and associated documentation files
17 (the "Software"), to deal in the Software without restriction,
18 including without limitation the rights to use, copy, modify, merge,
19 publish, distribute, sublicense, and/or sell copies of the Software,
20 and to permit persons to whom the Software is furnished to do so,
21 subject to the following conditions:
23 The above copyright notice and this permission notice shall be
24 included in all copies or substantial portions of the Software.
26 Any person wishing to distribute modifications to the Software is
27 asked to send the modifications to the original developer so that
28 they can be incorporated into the canonical version. This is,
29 however, not a binding provision of this license.
31 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
32 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
33 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
34 IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
35 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
36 CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
37 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
39 /************************************************************************/
41 // RtAudio: Version 5.0.0
51 // Static variable definitions.
52 const unsigned int RtApi::MAX_SAMPLE_RATES = 14;
53 const unsigned int RtApi::SAMPLE_RATES[] = {
54 4000, 5512, 8000, 9600, 11025, 16000, 22050,
55 32000, 44100, 48000, 88200, 96000, 176400, 192000
58 #if defined(__WINDOWS_DS__) || defined(__WINDOWS_ASIO__) || defined(__WINDOWS_WASAPI__)
59 #define MUTEX_INITIALIZE(A) InitializeCriticalSection(A)
60 #define MUTEX_DESTROY(A) DeleteCriticalSection(A)
61 #define MUTEX_LOCK(A) EnterCriticalSection(A)
62 #define MUTEX_UNLOCK(A) LeaveCriticalSection(A)
66 static std::string convertCharPointerToStdString(const char *text)
68 return std::string(text);
71 static std::string convertCharPointerToStdString(const wchar_t *text)
73 int length = WideCharToMultiByte(CP_UTF8, 0, text, -1, NULL, 0, NULL, NULL);
74 std::string s( length-1, '\0' );
75 WideCharToMultiByte(CP_UTF8, 0, text, -1, &s[0], length, NULL, NULL);
79 #elif defined(__LINUX_ALSA__) || defined(__LINUX_PULSE__) || defined(__UNIX_JACK__) || defined(__LINUX_OSS__) || defined(__MACOSX_CORE__)
81 #define MUTEX_INITIALIZE(A) pthread_mutex_init(A, NULL)
82 #define MUTEX_DESTROY(A) pthread_mutex_destroy(A)
83 #define MUTEX_LOCK(A) pthread_mutex_lock(A)
84 #define MUTEX_UNLOCK(A) pthread_mutex_unlock(A)
86 #define MUTEX_INITIALIZE(A) abs(*A) // dummy definitions
87 #define MUTEX_DESTROY(A) abs(*A) // dummy definitions
90 // *************************************************** //
92 // RtAudio definitions.
94 // *************************************************** //
96 std::string RtAudio :: getVersion( void )
98 return RTAUDIO_VERSION;
101 // Define API names and display names.
102 // Must be in same order as API enum.
104 const char* rtaudio_api_names[][2] = {
105 { "unspecified" , "Unknown" },
107 { "pulse" , "Pulse" },
108 { "oss" , "OpenSoundSystem" },
110 { "core" , "CoreAudio" },
111 { "wasapi" , "WASAPI" },
113 { "ds" , "DirectSound" },
114 { "dummy" , "Dummy" },
116 const unsigned int rtaudio_num_api_names =
117 sizeof(rtaudio_api_names)/sizeof(rtaudio_api_names[0]);
119 // The order here will control the order of RtAudio's API search in
121 extern "C" const RtAudio::Api rtaudio_compiled_apis[] = {
122 #if defined(__UNIX_JACK__)
125 #if defined(__LINUX_PULSE__)
126 RtAudio::LINUX_PULSE,
128 #if defined(__LINUX_ALSA__)
131 #if defined(__LINUX_OSS__)
134 #if defined(__WINDOWS_ASIO__)
135 RtAudio::WINDOWS_ASIO,
137 #if defined(__WINDOWS_WASAPI__)
138 RtAudio::WINDOWS_WASAPI,
140 #if defined(__WINDOWS_DS__)
143 #if defined(__MACOSX_CORE__)
144 RtAudio::MACOSX_CORE,
146 #if defined(__RTAUDIO_DUMMY__)
147 RtAudio::RTAUDIO_DUMMY,
149 RtAudio::UNSPECIFIED,
151 extern "C" const unsigned int rtaudio_num_compiled_apis =
152 sizeof(rtaudio_compiled_apis)/sizeof(rtaudio_compiled_apis[0])-1;
155 // This is a compile-time check that rtaudio_num_api_names == RtAudio::NUM_APIS.
156 // If the build breaks here, check that they match.
157 template<bool b> class StaticAssert { private: StaticAssert() {} };
158 template<> class StaticAssert<true>{ public: StaticAssert() {} };
159 class StaticAssertions { StaticAssertions() {
160 StaticAssert<rtaudio_num_api_names == RtAudio::NUM_APIS>();
163 void RtAudio :: getCompiledApi( std::vector<RtAudio::Api> &apis )
165 apis = std::vector<RtAudio::Api>(rtaudio_compiled_apis,
166 rtaudio_compiled_apis + rtaudio_num_compiled_apis);
169 std::string RtAudio :: getApiName( RtAudio::Api api )
171 if (api < 0 || api >= RtAudio::NUM_APIS)
173 return rtaudio_api_names[api][0];
176 std::string RtAudio :: getApiDisplayName( RtAudio::Api api )
178 if (api < 0 || api >= RtAudio::NUM_APIS)
180 return rtaudio_api_names[api][1];
183 RtAudio::Api RtAudio :: getCompiledApiByName( const std::string &name )
186 for (i = 0; i < rtaudio_num_compiled_apis; ++i)
187 if (name == rtaudio_api_names[rtaudio_compiled_apis[i]][0])
188 return rtaudio_compiled_apis[i];
189 return RtAudio::UNSPECIFIED;
192 void RtAudio :: openRtApi( RtAudio::Api api )
198 #if defined(__UNIX_JACK__)
199 if ( api == UNIX_JACK )
200 rtapi_ = new RtApiJack();
202 #if defined(__LINUX_ALSA__)
203 if ( api == LINUX_ALSA )
204 rtapi_ = new RtApiAlsa();
206 #if defined(__LINUX_PULSE__)
207 if ( api == LINUX_PULSE )
208 rtapi_ = new RtApiPulse();
210 #if defined(__LINUX_OSS__)
211 if ( api == LINUX_OSS )
212 rtapi_ = new RtApiOss();
214 #if defined(__WINDOWS_ASIO__)
215 if ( api == WINDOWS_ASIO )
216 rtapi_ = new RtApiAsio();
218 #if defined(__WINDOWS_WASAPI__)
219 if ( api == WINDOWS_WASAPI )
220 rtapi_ = new RtApiWasapi();
222 #if defined(__WINDOWS_DS__)
223 if ( api == WINDOWS_DS )
224 rtapi_ = new RtApiDs();
226 #if defined(__MACOSX_CORE__)
227 if ( api == MACOSX_CORE )
228 rtapi_ = new RtApiCore();
230 #if defined(__RTAUDIO_DUMMY__)
231 if ( api == RTAUDIO_DUMMY )
232 rtapi_ = new RtApiDummy();
236 RtAudio :: RtAudio( RtAudio::Api api )
240 if ( api != UNSPECIFIED ) {
241 // Attempt to open the specified API.
243 if ( rtapi_ ) return;
245 // No compiled support for specified API value. Issue a debug
246 // warning and continue as if no API was specified.
247 std::cerr << "\nRtAudio: no compiled support for specified API argument!\n" << std::endl;
250 // Iterate through the compiled APIs and return as soon as we find
251 // one with at least one device or we reach the end of the list.
252 std::vector< RtAudio::Api > apis;
253 getCompiledApi( apis );
254 for ( unsigned int i=0; i<apis.size(); i++ ) {
255 openRtApi( apis[i] );
256 if ( rtapi_ && rtapi_->getDeviceCount() ) break;
259 if ( rtapi_ ) return;
261 // It should not be possible to get here because the preprocessor
262 // definition __RTAUDIO_DUMMY__ is automatically defined if no
263 // API-specific definitions are passed to the compiler. But just in
264 // case something weird happens, we'll thow an error.
265 std::string errorText = "\nRtAudio: no compiled API support found ... critical error!!\n\n";
266 throw( RtAudioError( errorText, RtAudioError::UNSPECIFIED ) );
269 RtAudio :: ~RtAudio()
275 void RtAudio :: openStream( RtAudio::StreamParameters *outputParameters,
276 RtAudio::StreamParameters *inputParameters,
277 RtAudioFormat format, unsigned int sampleRate,
278 unsigned int *bufferFrames,
279 RtAudioCallback callback, void *userData,
280 RtAudio::StreamOptions *options,
281 RtAudioErrorCallback errorCallback )
283 return rtapi_->openStream( outputParameters, inputParameters, format,
284 sampleRate, bufferFrames, callback,
285 userData, options, errorCallback );
288 // *************************************************** //
290 // Public RtApi definitions (see end of file for
291 // private or protected utility functions).
293 // *************************************************** //
297 stream_.state = STREAM_CLOSED;
298 stream_.mode = UNINITIALIZED;
299 stream_.apiHandle = 0;
300 stream_.userBuffer[0] = 0;
301 stream_.userBuffer[1] = 0;
302 MUTEX_INITIALIZE( &stream_.mutex );
303 showWarnings_ = true;
304 firstErrorOccurred_ = false;
309 MUTEX_DESTROY( &stream_.mutex );
312 void RtApi :: openStream( RtAudio::StreamParameters *oParams,
313 RtAudio::StreamParameters *iParams,
314 RtAudioFormat format, unsigned int sampleRate,
315 unsigned int *bufferFrames,
316 RtAudioCallback callback, void *userData,
317 RtAudio::StreamOptions *options,
318 RtAudioErrorCallback errorCallback )
320 if ( stream_.state != STREAM_CLOSED ) {
321 errorText_ = "RtApi::openStream: a stream is already open!";
322 error( RtAudioError::INVALID_USE );
326 // Clear stream information potentially left from a previously open stream.
329 if ( oParams && oParams->nChannels < 1 ) {
330 errorText_ = "RtApi::openStream: a non-NULL output StreamParameters structure cannot have an nChannels value less than one.";
331 error( RtAudioError::INVALID_USE );
335 if ( iParams && iParams->nChannels < 1 ) {
336 errorText_ = "RtApi::openStream: a non-NULL input StreamParameters structure cannot have an nChannels value less than one.";
337 error( RtAudioError::INVALID_USE );
341 if ( oParams == NULL && iParams == NULL ) {
342 errorText_ = "RtApi::openStream: input and output StreamParameters structures are both NULL!";
343 error( RtAudioError::INVALID_USE );
347 if ( formatBytes(format) == 0 ) {
348 errorText_ = "RtApi::openStream: 'format' parameter value is undefined.";
349 error( RtAudioError::INVALID_USE );
353 unsigned int nDevices = getDeviceCount();
354 unsigned int oChannels = 0;
356 oChannels = oParams->nChannels;
357 if ( oParams->deviceId >= nDevices ) {
358 errorText_ = "RtApi::openStream: output device parameter value is invalid.";
359 error( RtAudioError::INVALID_USE );
364 unsigned int iChannels = 0;
366 iChannels = iParams->nChannels;
367 if ( iParams->deviceId >= nDevices ) {
368 errorText_ = "RtApi::openStream: input device parameter value is invalid.";
369 error( RtAudioError::INVALID_USE );
376 if ( oChannels > 0 ) {
378 result = probeDeviceOpen( oParams->deviceId, OUTPUT, oChannels, oParams->firstChannel,
379 sampleRate, format, bufferFrames, options );
380 if ( result == false ) {
381 error( RtAudioError::SYSTEM_ERROR );
386 if ( iChannels > 0 ) {
388 result = probeDeviceOpen( iParams->deviceId, INPUT, iChannels, iParams->firstChannel,
389 sampleRate, format, bufferFrames, options );
390 if ( result == false ) {
391 if ( oChannels > 0 ) closeStream();
392 error( RtAudioError::SYSTEM_ERROR );
397 stream_.callbackInfo.callback = (void *) callback;
398 stream_.callbackInfo.userData = userData;
399 stream_.callbackInfo.errorCallback = (void *) errorCallback;
401 if ( options ) options->numberOfBuffers = stream_.nBuffers;
402 stream_.state = STREAM_STOPPED;
405 unsigned int RtApi :: getDefaultInputDevice( void )
407 // Should be implemented in subclasses if possible.
411 unsigned int RtApi :: getDefaultOutputDevice( void )
413 // Should be implemented in subclasses if possible.
417 void RtApi :: closeStream( void )
419 // MUST be implemented in subclasses!
423 bool RtApi :: probeDeviceOpen( unsigned int /*device*/, StreamMode /*mode*/, unsigned int /*channels*/,
424 unsigned int /*firstChannel*/, unsigned int /*sampleRate*/,
425 RtAudioFormat /*format*/, unsigned int * /*bufferSize*/,
426 RtAudio::StreamOptions * /*options*/ )
428 // MUST be implemented in subclasses!
432 void RtApi :: tickStreamTime( void )
434 // Subclasses that do not provide their own implementation of
435 // getStreamTime should call this function once per buffer I/O to
436 // provide basic stream time support.
438 stream_.streamTime += ( stream_.bufferSize * 1.0 / stream_.sampleRate );
440 #if defined( HAVE_GETTIMEOFDAY )
441 gettimeofday( &stream_.lastTickTimestamp, NULL );
445 long RtApi :: getStreamLatency( void )
449 long totalLatency = 0;
450 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
451 totalLatency = stream_.latency[0];
452 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
453 totalLatency += stream_.latency[1];
458 double RtApi :: getStreamTime( void )
462 #if defined( HAVE_GETTIMEOFDAY )
463 // Return a very accurate estimate of the stream time by
464 // adding in the elapsed time since the last tick.
468 if ( stream_.state != STREAM_RUNNING || stream_.streamTime == 0.0 )
469 return stream_.streamTime;
471 gettimeofday( &now, NULL );
472 then = stream_.lastTickTimestamp;
473 return stream_.streamTime +
474 ((now.tv_sec + 0.000001 * now.tv_usec) -
475 (then.tv_sec + 0.000001 * then.tv_usec));
477 return stream_.streamTime;
481 void RtApi :: setStreamTime( double time )
486 stream_.streamTime = time;
487 #if defined( HAVE_GETTIMEOFDAY )
488 gettimeofday( &stream_.lastTickTimestamp, NULL );
492 unsigned int RtApi :: getStreamSampleRate( void )
496 return stream_.sampleRate;
500 // *************************************************** //
502 // OS/API-specific methods.
504 // *************************************************** //
506 #if defined(__MACOSX_CORE__)
508 // The OS X CoreAudio API is designed to use a separate callback
509 // procedure for each of its audio devices. A single RtAudio duplex
510 // stream using two different devices is supported here, though it
511 // cannot be guaranteed to always behave correctly because we cannot
512 // synchronize these two callbacks.
514 // A property listener is installed for over/underrun information.
515 // However, no functionality is currently provided to allow property
516 // listeners to trigger user handlers because it is unclear what could
517 // be done if a critical stream parameter (buffer size, sample rate,
518 // device disconnect) notification arrived. The listeners entail
519 // quite a bit of extra code and most likely, a user program wouldn't
520 // be prepared for the result anyway. However, we do provide a flag
521 // to the client callback function to inform of an over/underrun.
523 // A structure to hold various information related to the CoreAudio API
526 AudioDeviceID id[2]; // device ids
527 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
528 AudioDeviceIOProcID procId[2];
530 UInt32 iStream[2]; // device stream index (or first if using multiple)
531 UInt32 nStreams[2]; // number of streams to use
534 pthread_cond_t condition;
535 int drainCounter; // Tracks callback counts when draining
536 bool internalDrain; // Indicates if stop is initiated from callback or not.
539 :deviceBuffer(0), drainCounter(0), internalDrain(false) { nStreams[0] = 1; nStreams[1] = 1; id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
542 RtApiCore:: RtApiCore()
544 #if defined( AVAILABLE_MAC_OS_X_VERSION_10_6_AND_LATER )
545 // This is a largely undocumented but absolutely necessary
546 // requirement starting with OS-X 10.6. If not called, queries and
547 // updates to various audio device properties are not handled
549 CFRunLoopRef theRunLoop = NULL;
550 AudioObjectPropertyAddress property = { kAudioHardwarePropertyRunLoop,
551 kAudioObjectPropertyScopeGlobal,
552 kAudioObjectPropertyElementMaster };
553 OSStatus result = AudioObjectSetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, sizeof(CFRunLoopRef), &theRunLoop);
554 if ( result != noErr ) {
555 errorText_ = "RtApiCore::RtApiCore: error setting run loop property!";
556 error( RtAudioError::WARNING );
561 RtApiCore :: ~RtApiCore()
563 // The subclass destructor gets called before the base class
564 // destructor, so close an existing stream before deallocating
565 // apiDeviceId memory.
566 if ( stream_.state != STREAM_CLOSED ) closeStream();
569 unsigned int RtApiCore :: getDeviceCount( void )
571 // Find out how many audio devices there are, if any.
573 AudioObjectPropertyAddress propertyAddress = { kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
574 OSStatus result = AudioObjectGetPropertyDataSize( kAudioObjectSystemObject, &propertyAddress, 0, NULL, &dataSize );
575 if ( result != noErr ) {
576 errorText_ = "RtApiCore::getDeviceCount: OS-X error getting device info!";
577 error( RtAudioError::WARNING );
581 return dataSize / sizeof( AudioDeviceID );
584 unsigned int RtApiCore :: getDefaultInputDevice( void )
586 unsigned int nDevices = getDeviceCount();
587 if ( nDevices <= 1 ) return 0;
590 UInt32 dataSize = sizeof( AudioDeviceID );
591 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultInputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
592 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
593 if ( result != noErr ) {
594 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device.";
595 error( RtAudioError::WARNING );
599 dataSize *= nDevices;
600 AudioDeviceID deviceList[ nDevices ];
601 property.mSelector = kAudioHardwarePropertyDevices;
602 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
603 if ( result != noErr ) {
604 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device IDs.";
605 error( RtAudioError::WARNING );
609 for ( unsigned int i=0; i<nDevices; i++ )
610 if ( id == deviceList[i] ) return i;
612 errorText_ = "RtApiCore::getDefaultInputDevice: No default device found!";
613 error( RtAudioError::WARNING );
617 unsigned int RtApiCore :: getDefaultOutputDevice( void )
619 unsigned int nDevices = getDeviceCount();
620 if ( nDevices <= 1 ) return 0;
623 UInt32 dataSize = sizeof( AudioDeviceID );
624 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultOutputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
625 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
626 if ( result != noErr ) {
627 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device.";
628 error( RtAudioError::WARNING );
632 dataSize = sizeof( AudioDeviceID ) * nDevices;
633 AudioDeviceID deviceList[ nDevices ];
634 property.mSelector = kAudioHardwarePropertyDevices;
635 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
636 if ( result != noErr ) {
637 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device IDs.";
638 error( RtAudioError::WARNING );
642 for ( unsigned int i=0; i<nDevices; i++ )
643 if ( id == deviceList[i] ) return i;
645 errorText_ = "RtApiCore::getDefaultOutputDevice: No default device found!";
646 error( RtAudioError::WARNING );
650 RtAudio::DeviceInfo RtApiCore :: getDeviceInfo( unsigned int device )
652 RtAudio::DeviceInfo info;
656 unsigned int nDevices = getDeviceCount();
657 if ( nDevices == 0 ) {
658 errorText_ = "RtApiCore::getDeviceInfo: no devices found!";
659 error( RtAudioError::INVALID_USE );
663 if ( device >= nDevices ) {
664 errorText_ = "RtApiCore::getDeviceInfo: device ID is invalid!";
665 error( RtAudioError::INVALID_USE );
669 AudioDeviceID deviceList[ nDevices ];
670 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
671 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
672 kAudioObjectPropertyScopeGlobal,
673 kAudioObjectPropertyElementMaster };
674 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
675 0, NULL, &dataSize, (void *) &deviceList );
676 if ( result != noErr ) {
677 errorText_ = "RtApiCore::getDeviceInfo: OS-X system error getting device IDs.";
678 error( RtAudioError::WARNING );
682 AudioDeviceID id = deviceList[ device ];
684 // Get the device name.
687 dataSize = sizeof( CFStringRef );
688 property.mSelector = kAudioObjectPropertyManufacturer;
689 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
690 if ( result != noErr ) {
691 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device manufacturer.";
692 errorText_ = errorStream_.str();
693 error( RtAudioError::WARNING );
697 //const char *mname = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
698 int length = CFStringGetLength(cfname);
699 char *mname = (char *)malloc(length * 3 + 1);
700 #if defined( UNICODE ) || defined( _UNICODE )
701 CFStringGetCString(cfname, mname, length * 3 + 1, kCFStringEncodingUTF8);
703 CFStringGetCString(cfname, mname, length * 3 + 1, CFStringGetSystemEncoding());
705 info.name.append( (const char *)mname, strlen(mname) );
706 info.name.append( ": " );
710 property.mSelector = kAudioObjectPropertyName;
711 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
712 if ( result != noErr ) {
713 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device name.";
714 errorText_ = errorStream_.str();
715 error( RtAudioError::WARNING );
719 //const char *name = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
720 length = CFStringGetLength(cfname);
721 char *name = (char *)malloc(length * 3 + 1);
722 #if defined( UNICODE ) || defined( _UNICODE )
723 CFStringGetCString(cfname, name, length * 3 + 1, kCFStringEncodingUTF8);
725 CFStringGetCString(cfname, name, length * 3 + 1, CFStringGetSystemEncoding());
727 info.name.append( (const char *)name, strlen(name) );
731 // Get the output stream "configuration".
732 AudioBufferList *bufferList = nil;
733 property.mSelector = kAudioDevicePropertyStreamConfiguration;
734 property.mScope = kAudioDevicePropertyScopeOutput;
735 // property.mElement = kAudioObjectPropertyElementWildcard;
737 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
738 if ( result != noErr || dataSize == 0 ) {
739 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration info for device (" << device << ").";
740 errorText_ = errorStream_.str();
741 error( RtAudioError::WARNING );
745 // Allocate the AudioBufferList.
746 bufferList = (AudioBufferList *) malloc( dataSize );
747 if ( bufferList == NULL ) {
748 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating output AudioBufferList.";
749 error( RtAudioError::WARNING );
753 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
754 if ( result != noErr || dataSize == 0 ) {
756 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration for device (" << device << ").";
757 errorText_ = errorStream_.str();
758 error( RtAudioError::WARNING );
762 // Get output channel information.
763 unsigned int i, nStreams = bufferList->mNumberBuffers;
764 for ( i=0; i<nStreams; i++ )
765 info.outputChannels += bufferList->mBuffers[i].mNumberChannels;
768 // Get the input stream "configuration".
769 property.mScope = kAudioDevicePropertyScopeInput;
770 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
771 if ( result != noErr || dataSize == 0 ) {
772 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration info for device (" << device << ").";
773 errorText_ = errorStream_.str();
774 error( RtAudioError::WARNING );
778 // Allocate the AudioBufferList.
779 bufferList = (AudioBufferList *) malloc( dataSize );
780 if ( bufferList == NULL ) {
781 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating input AudioBufferList.";
782 error( RtAudioError::WARNING );
786 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
787 if (result != noErr || dataSize == 0) {
789 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration for device (" << device << ").";
790 errorText_ = errorStream_.str();
791 error( RtAudioError::WARNING );
795 // Get input channel information.
796 nStreams = bufferList->mNumberBuffers;
797 for ( i=0; i<nStreams; i++ )
798 info.inputChannels += bufferList->mBuffers[i].mNumberChannels;
801 // If device opens for both playback and capture, we determine the channels.
802 if ( info.outputChannels > 0 && info.inputChannels > 0 )
803 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
805 // Probe the device sample rates.
806 bool isInput = false;
807 if ( info.outputChannels == 0 ) isInput = true;
809 // Determine the supported sample rates.
810 property.mSelector = kAudioDevicePropertyAvailableNominalSampleRates;
811 if ( isInput == false ) property.mScope = kAudioDevicePropertyScopeOutput;
812 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
813 if ( result != kAudioHardwareNoError || dataSize == 0 ) {
814 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rate info.";
815 errorText_ = errorStream_.str();
816 error( RtAudioError::WARNING );
820 UInt32 nRanges = dataSize / sizeof( AudioValueRange );
821 AudioValueRange rangeList[ nRanges ];
822 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &rangeList );
823 if ( result != kAudioHardwareNoError ) {
824 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rates.";
825 errorText_ = errorStream_.str();
826 error( RtAudioError::WARNING );
830 // The sample rate reporting mechanism is a bit of a mystery. It
831 // seems that it can either return individual rates or a range of
832 // rates. I assume that if the min / max range values are the same,
833 // then that represents a single supported rate and if the min / max
834 // range values are different, the device supports an arbitrary
835 // range of values (though there might be multiple ranges, so we'll
836 // use the most conservative range).
837 Float64 minimumRate = 1.0, maximumRate = 10000000000.0;
838 bool haveValueRange = false;
839 info.sampleRates.clear();
840 for ( UInt32 i=0; i<nRanges; i++ ) {
841 if ( rangeList[i].mMinimum == rangeList[i].mMaximum ) {
842 unsigned int tmpSr = (unsigned int) rangeList[i].mMinimum;
843 info.sampleRates.push_back( tmpSr );
845 if ( !info.preferredSampleRate || ( tmpSr <= 48000 && tmpSr > info.preferredSampleRate ) )
846 info.preferredSampleRate = tmpSr;
849 haveValueRange = true;
850 if ( rangeList[i].mMinimum > minimumRate ) minimumRate = rangeList[i].mMinimum;
851 if ( rangeList[i].mMaximum < maximumRate ) maximumRate = rangeList[i].mMaximum;
855 if ( haveValueRange ) {
856 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
857 if ( SAMPLE_RATES[k] >= (unsigned int) minimumRate && SAMPLE_RATES[k] <= (unsigned int) maximumRate ) {
858 info.sampleRates.push_back( SAMPLE_RATES[k] );
860 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
861 info.preferredSampleRate = SAMPLE_RATES[k];
866 // Sort and remove any redundant values
867 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
868 info.sampleRates.erase( unique( info.sampleRates.begin(), info.sampleRates.end() ), info.sampleRates.end() );
870 if ( info.sampleRates.size() == 0 ) {
871 errorStream_ << "RtApiCore::probeDeviceInfo: No supported sample rates found for device (" << device << ").";
872 errorText_ = errorStream_.str();
873 error( RtAudioError::WARNING );
877 // CoreAudio always uses 32-bit floating point data for PCM streams.
878 // Thus, any other "physical" formats supported by the device are of
879 // no interest to the client.
880 info.nativeFormats = RTAUDIO_FLOAT32;
882 if ( info.outputChannels > 0 )
883 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
884 if ( info.inputChannels > 0 )
885 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
891 static OSStatus callbackHandler( AudioDeviceID inDevice,
892 const AudioTimeStamp* /*inNow*/,
893 const AudioBufferList* inInputData,
894 const AudioTimeStamp* /*inInputTime*/,
895 AudioBufferList* outOutputData,
896 const AudioTimeStamp* /*inOutputTime*/,
899 CallbackInfo *info = (CallbackInfo *) infoPointer;
901 RtApiCore *object = (RtApiCore *) info->object;
902 if ( object->callbackEvent( inDevice, inInputData, outOutputData ) == false )
903 return kAudioHardwareUnspecifiedError;
905 return kAudioHardwareNoError;
908 static OSStatus xrunListener( AudioObjectID /*inDevice*/,
910 const AudioObjectPropertyAddress properties[],
911 void* handlePointer )
913 CoreHandle *handle = (CoreHandle *) handlePointer;
914 for ( UInt32 i=0; i<nAddresses; i++ ) {
915 if ( properties[i].mSelector == kAudioDeviceProcessorOverload ) {
916 if ( properties[i].mScope == kAudioDevicePropertyScopeInput )
917 handle->xrun[1] = true;
919 handle->xrun[0] = true;
923 return kAudioHardwareNoError;
926 static OSStatus rateListener( AudioObjectID inDevice,
927 UInt32 /*nAddresses*/,
928 const AudioObjectPropertyAddress /*properties*/[],
931 Float64 *rate = (Float64 *) ratePointer;
932 UInt32 dataSize = sizeof( Float64 );
933 AudioObjectPropertyAddress property = { kAudioDevicePropertyNominalSampleRate,
934 kAudioObjectPropertyScopeGlobal,
935 kAudioObjectPropertyElementMaster };
936 AudioObjectGetPropertyData( inDevice, &property, 0, NULL, &dataSize, rate );
937 return kAudioHardwareNoError;
940 bool RtApiCore :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
941 unsigned int firstChannel, unsigned int sampleRate,
942 RtAudioFormat format, unsigned int *bufferSize,
943 RtAudio::StreamOptions *options )
946 unsigned int nDevices = getDeviceCount();
947 if ( nDevices == 0 ) {
948 // This should not happen because a check is made before this function is called.
949 errorText_ = "RtApiCore::probeDeviceOpen: no devices found!";
953 if ( device >= nDevices ) {
954 // This should not happen because a check is made before this function is called.
955 errorText_ = "RtApiCore::probeDeviceOpen: device ID is invalid!";
959 AudioDeviceID deviceList[ nDevices ];
960 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
961 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
962 kAudioObjectPropertyScopeGlobal,
963 kAudioObjectPropertyElementMaster };
964 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
965 0, NULL, &dataSize, (void *) &deviceList );
966 if ( result != noErr ) {
967 errorText_ = "RtApiCore::probeDeviceOpen: OS-X system error getting device IDs.";
971 AudioDeviceID id = deviceList[ device ];
973 // Setup for stream mode.
974 bool isInput = false;
975 if ( mode == INPUT ) {
977 property.mScope = kAudioDevicePropertyScopeInput;
980 property.mScope = kAudioDevicePropertyScopeOutput;
982 // Get the stream "configuration".
983 AudioBufferList *bufferList = nil;
985 property.mSelector = kAudioDevicePropertyStreamConfiguration;
986 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
987 if ( result != noErr || dataSize == 0 ) {
988 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration info for device (" << device << ").";
989 errorText_ = errorStream_.str();
993 // Allocate the AudioBufferList.
994 bufferList = (AudioBufferList *) malloc( dataSize );
995 if ( bufferList == NULL ) {
996 errorText_ = "RtApiCore::probeDeviceOpen: memory error allocating AudioBufferList.";
1000 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
1001 if (result != noErr || dataSize == 0) {
1003 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration for device (" << device << ").";
1004 errorText_ = errorStream_.str();
1008 // Search for one or more streams that contain the desired number of
1009 // channels. CoreAudio devices can have an arbitrary number of
1010 // streams and each stream can have an arbitrary number of channels.
1011 // For each stream, a single buffer of interleaved samples is
1012 // provided. RtAudio prefers the use of one stream of interleaved
1013 // data or multiple consecutive single-channel streams. However, we
1014 // now support multiple consecutive multi-channel streams of
1015 // interleaved data as well.
1016 UInt32 iStream, offsetCounter = firstChannel;
1017 UInt32 nStreams = bufferList->mNumberBuffers;
1018 bool monoMode = false;
1019 bool foundStream = false;
1021 // First check that the device supports the requested number of
1023 UInt32 deviceChannels = 0;
1024 for ( iStream=0; iStream<nStreams; iStream++ )
1025 deviceChannels += bufferList->mBuffers[iStream].mNumberChannels;
1027 if ( deviceChannels < ( channels + firstChannel ) ) {
1029 errorStream_ << "RtApiCore::probeDeviceOpen: the device (" << device << ") does not support the requested channel count.";
1030 errorText_ = errorStream_.str();
1034 // Look for a single stream meeting our needs.
1035 UInt32 firstStream, streamCount = 1, streamChannels = 0, channelOffset = 0;
1036 for ( iStream=0; iStream<nStreams; iStream++ ) {
1037 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
1038 if ( streamChannels >= channels + offsetCounter ) {
1039 firstStream = iStream;
1040 channelOffset = offsetCounter;
1044 if ( streamChannels > offsetCounter ) break;
1045 offsetCounter -= streamChannels;
1048 // If we didn't find a single stream above, then we should be able
1049 // to meet the channel specification with multiple streams.
1050 if ( foundStream == false ) {
1052 offsetCounter = firstChannel;
1053 for ( iStream=0; iStream<nStreams; iStream++ ) {
1054 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
1055 if ( streamChannels > offsetCounter ) break;
1056 offsetCounter -= streamChannels;
1059 firstStream = iStream;
1060 channelOffset = offsetCounter;
1061 Int32 channelCounter = channels + offsetCounter - streamChannels;
1063 if ( streamChannels > 1 ) monoMode = false;
1064 while ( channelCounter > 0 ) {
1065 streamChannels = bufferList->mBuffers[++iStream].mNumberChannels;
1066 if ( streamChannels > 1 ) monoMode = false;
1067 channelCounter -= streamChannels;
1074 // Determine the buffer size.
1075 AudioValueRange bufferRange;
1076 dataSize = sizeof( AudioValueRange );
1077 property.mSelector = kAudioDevicePropertyBufferFrameSizeRange;
1078 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &bufferRange );
1080 if ( result != noErr ) {
1081 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting buffer size range for device (" << device << ").";
1082 errorText_ = errorStream_.str();
1086 if ( bufferRange.mMinimum > *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMinimum;
1087 else if ( bufferRange.mMaximum < *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMaximum;
1088 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) *bufferSize = (unsigned long) bufferRange.mMinimum;
1090 // Set the buffer size. For multiple streams, I'm assuming we only
1091 // need to make this setting for the master channel.
1092 UInt32 theSize = (UInt32) *bufferSize;
1093 dataSize = sizeof( UInt32 );
1094 property.mSelector = kAudioDevicePropertyBufferFrameSize;
1095 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &theSize );
1097 if ( result != noErr ) {
1098 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting the buffer size for device (" << device << ").";
1099 errorText_ = errorStream_.str();
1103 // If attempting to setup a duplex stream, the bufferSize parameter
1104 // MUST be the same in both directions!
1105 *bufferSize = theSize;
1106 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
1107 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << device << ").";
1108 errorText_ = errorStream_.str();
1112 stream_.bufferSize = *bufferSize;
1113 stream_.nBuffers = 1;
1115 // Try to set "hog" mode ... it's not clear to me this is working.
1116 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) {
1118 dataSize = sizeof( hog_pid );
1119 property.mSelector = kAudioDevicePropertyHogMode;
1120 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &hog_pid );
1121 if ( result != noErr ) {
1122 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting 'hog' state!";
1123 errorText_ = errorStream_.str();
1127 if ( hog_pid != getpid() ) {
1129 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &hog_pid );
1130 if ( result != noErr ) {
1131 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting 'hog' state!";
1132 errorText_ = errorStream_.str();
1138 // Check and if necessary, change the sample rate for the device.
1139 Float64 nominalRate;
1140 dataSize = sizeof( Float64 );
1141 property.mSelector = kAudioDevicePropertyNominalSampleRate;
1142 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &nominalRate );
1143 if ( result != noErr ) {
1144 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting current sample rate.";
1145 errorText_ = errorStream_.str();
1149 // Only change the sample rate if off by more than 1 Hz.
1150 if ( fabs( nominalRate - (double)sampleRate ) > 1.0 ) {
1152 // Set a property listener for the sample rate change
1153 Float64 reportedRate = 0.0;
1154 AudioObjectPropertyAddress tmp = { kAudioDevicePropertyNominalSampleRate, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
1155 result = AudioObjectAddPropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1156 if ( result != noErr ) {
1157 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate property listener for device (" << device << ").";
1158 errorText_ = errorStream_.str();
1162 nominalRate = (Float64) sampleRate;
1163 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &nominalRate );
1164 if ( result != noErr ) {
1165 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1166 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate for device (" << device << ").";
1167 errorText_ = errorStream_.str();
1171 // Now wait until the reported nominal rate is what we just set.
1172 UInt32 microCounter = 0;
1173 while ( reportedRate != nominalRate ) {
1174 microCounter += 5000;
1175 if ( microCounter > 5000000 ) break;
1179 // Remove the property listener.
1180 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1182 if ( microCounter > 5000000 ) {
1183 errorStream_ << "RtApiCore::probeDeviceOpen: timeout waiting for sample rate update for device (" << device << ").";
1184 errorText_ = errorStream_.str();
1189 // Now set the stream format for all streams. Also, check the
1190 // physical format of the device and change that if necessary.
1191 AudioStreamBasicDescription description;
1192 dataSize = sizeof( AudioStreamBasicDescription );
1193 property.mSelector = kAudioStreamPropertyVirtualFormat;
1194 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
1195 if ( result != noErr ) {
1196 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream format for device (" << device << ").";
1197 errorText_ = errorStream_.str();
1201 // Set the sample rate and data format id. However, only make the
1202 // change if the sample rate is not within 1.0 of the desired
1203 // rate and the format is not linear pcm.
1204 bool updateFormat = false;
1205 if ( fabs( description.mSampleRate - (Float64)sampleRate ) > 1.0 ) {
1206 description.mSampleRate = (Float64) sampleRate;
1207 updateFormat = true;
1210 if ( description.mFormatID != kAudioFormatLinearPCM ) {
1211 description.mFormatID = kAudioFormatLinearPCM;
1212 updateFormat = true;
1215 if ( updateFormat ) {
1216 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &description );
1217 if ( result != noErr ) {
1218 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate or data format for device (" << device << ").";
1219 errorText_ = errorStream_.str();
1224 // Now check the physical format.
1225 property.mSelector = kAudioStreamPropertyPhysicalFormat;
1226 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
1227 if ( result != noErr ) {
1228 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream physical format for device (" << device << ").";
1229 errorText_ = errorStream_.str();
1233 //std::cout << "Current physical stream format:" << std::endl;
1234 //std::cout << " mBitsPerChan = " << description.mBitsPerChannel << std::endl;
1235 //std::cout << " aligned high = " << (description.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (description.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
1236 //std::cout << " bytesPerFrame = " << description.mBytesPerFrame << std::endl;
1237 //std::cout << " sample rate = " << description.mSampleRate << std::endl;
1239 if ( description.mFormatID != kAudioFormatLinearPCM || description.mBitsPerChannel < 16 ) {
1240 description.mFormatID = kAudioFormatLinearPCM;
1241 //description.mSampleRate = (Float64) sampleRate;
1242 AudioStreamBasicDescription testDescription = description;
1245 // We'll try higher bit rates first and then work our way down.
1246 std::vector< std::pair<UInt32, UInt32> > physicalFormats;
1247 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsFloat) & ~kLinearPCMFormatFlagIsSignedInteger;
1248 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
1249 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
1250 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
1251 physicalFormats.push_back( std::pair<Float32, UInt32>( 24, formatFlags ) ); // 24-bit packed
1252 formatFlags &= ~( kAudioFormatFlagIsPacked | kAudioFormatFlagIsAlignedHigh );
1253 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.2, formatFlags ) ); // 24-bit in 4 bytes, aligned low
1254 formatFlags |= kAudioFormatFlagIsAlignedHigh;
1255 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.4, formatFlags ) ); // 24-bit in 4 bytes, aligned high
1256 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
1257 physicalFormats.push_back( std::pair<Float32, UInt32>( 16, formatFlags ) );
1258 physicalFormats.push_back( std::pair<Float32, UInt32>( 8, formatFlags ) );
1260 bool setPhysicalFormat = false;
1261 for( unsigned int i=0; i<physicalFormats.size(); i++ ) {
1262 testDescription = description;
1263 testDescription.mBitsPerChannel = (UInt32) physicalFormats[i].first;
1264 testDescription.mFormatFlags = physicalFormats[i].second;
1265 if ( (24 == (UInt32)physicalFormats[i].first) && ~( physicalFormats[i].second & kAudioFormatFlagIsPacked ) )
1266 testDescription.mBytesPerFrame = 4 * testDescription.mChannelsPerFrame;
1268 testDescription.mBytesPerFrame = testDescription.mBitsPerChannel/8 * testDescription.mChannelsPerFrame;
1269 testDescription.mBytesPerPacket = testDescription.mBytesPerFrame * testDescription.mFramesPerPacket;
1270 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &testDescription );
1271 if ( result == noErr ) {
1272 setPhysicalFormat = true;
1273 //std::cout << "Updated physical stream format:" << std::endl;
1274 //std::cout << " mBitsPerChan = " << testDescription.mBitsPerChannel << std::endl;
1275 //std::cout << " aligned high = " << (testDescription.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (testDescription.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
1276 //std::cout << " bytesPerFrame = " << testDescription.mBytesPerFrame << std::endl;
1277 //std::cout << " sample rate = " << testDescription.mSampleRate << std::endl;
1282 if ( !setPhysicalFormat ) {
1283 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting physical data format for device (" << device << ").";
1284 errorText_ = errorStream_.str();
1287 } // done setting virtual/physical formats.
1289 // Get the stream / device latency.
1291 dataSize = sizeof( UInt32 );
1292 property.mSelector = kAudioDevicePropertyLatency;
1293 if ( AudioObjectHasProperty( id, &property ) == true ) {
1294 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &latency );
1295 if ( result == kAudioHardwareNoError ) stream_.latency[ mode ] = latency;
1297 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting device latency for device (" << device << ").";
1298 errorText_ = errorStream_.str();
1299 error( RtAudioError::WARNING );
1303 // Byte-swapping: According to AudioHardware.h, the stream data will
1304 // always be presented in native-endian format, so we should never
1305 // need to byte swap.
1306 stream_.doByteSwap[mode] = false;
1308 // From the CoreAudio documentation, PCM data must be supplied as
1310 stream_.userFormat = format;
1311 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
1313 if ( streamCount == 1 )
1314 stream_.nDeviceChannels[mode] = description.mChannelsPerFrame;
1315 else // multiple streams
1316 stream_.nDeviceChannels[mode] = channels;
1317 stream_.nUserChannels[mode] = channels;
1318 stream_.channelOffset[mode] = channelOffset; // offset within a CoreAudio stream
1319 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
1320 else stream_.userInterleaved = true;
1321 stream_.deviceInterleaved[mode] = true;
1322 if ( monoMode == true ) stream_.deviceInterleaved[mode] = false;
1324 // Set flags for buffer conversion.
1325 stream_.doConvertBuffer[mode] = false;
1326 if ( stream_.userFormat != stream_.deviceFormat[mode] )
1327 stream_.doConvertBuffer[mode] = true;
1328 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
1329 stream_.doConvertBuffer[mode] = true;
1330 if ( streamCount == 1 ) {
1331 if ( stream_.nUserChannels[mode] > 1 &&
1332 stream_.userInterleaved != stream_.deviceInterleaved[mode] )
1333 stream_.doConvertBuffer[mode] = true;
1335 else if ( monoMode && stream_.userInterleaved )
1336 stream_.doConvertBuffer[mode] = true;
1338 // Allocate our CoreHandle structure for the stream.
1339 CoreHandle *handle = 0;
1340 if ( stream_.apiHandle == 0 ) {
1342 handle = new CoreHandle;
1344 catch ( std::bad_alloc& ) {
1345 errorText_ = "RtApiCore::probeDeviceOpen: error allocating CoreHandle memory.";
1349 if ( pthread_cond_init( &handle->condition, NULL ) ) {
1350 errorText_ = "RtApiCore::probeDeviceOpen: error initializing pthread condition variable.";
1353 stream_.apiHandle = (void *) handle;
1356 handle = (CoreHandle *) stream_.apiHandle;
1357 handle->iStream[mode] = firstStream;
1358 handle->nStreams[mode] = streamCount;
1359 handle->id[mode] = id;
1361 // Allocate necessary internal buffers.
1362 unsigned long bufferBytes;
1363 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
1364 // stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
1365 stream_.userBuffer[mode] = (char *) malloc( bufferBytes * sizeof(char) );
1366 memset( stream_.userBuffer[mode], 0, bufferBytes * sizeof(char) );
1367 if ( stream_.userBuffer[mode] == NULL ) {
1368 errorText_ = "RtApiCore::probeDeviceOpen: error allocating user buffer memory.";
1372 // If possible, we will make use of the CoreAudio stream buffers as
1373 // "device buffers". However, we can't do this if using multiple
1375 if ( stream_.doConvertBuffer[mode] && handle->nStreams[mode] > 1 ) {
1377 bool makeBuffer = true;
1378 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
1379 if ( mode == INPUT ) {
1380 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
1381 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
1382 if ( bufferBytes <= bytesOut ) makeBuffer = false;
1387 bufferBytes *= *bufferSize;
1388 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
1389 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
1390 if ( stream_.deviceBuffer == NULL ) {
1391 errorText_ = "RtApiCore::probeDeviceOpen: error allocating device buffer memory.";
1397 stream_.sampleRate = sampleRate;
1398 stream_.device[mode] = device;
1399 stream_.state = STREAM_STOPPED;
1400 stream_.callbackInfo.object = (void *) this;
1402 // Setup the buffer conversion information structure.
1403 if ( stream_.doConvertBuffer[mode] ) {
1404 if ( streamCount > 1 ) setConvertInfo( mode, 0 );
1405 else setConvertInfo( mode, channelOffset );
1408 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device )
1409 // Only one callback procedure per device.
1410 stream_.mode = DUPLEX;
1412 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1413 result = AudioDeviceCreateIOProcID( id, callbackHandler, (void *) &stream_.callbackInfo, &handle->procId[mode] );
1415 // deprecated in favor of AudioDeviceCreateIOProcID()
1416 result = AudioDeviceAddIOProc( id, callbackHandler, (void *) &stream_.callbackInfo );
1418 if ( result != noErr ) {
1419 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting callback for device (" << device << ").";
1420 errorText_ = errorStream_.str();
1423 if ( stream_.mode == OUTPUT && mode == INPUT )
1424 stream_.mode = DUPLEX;
1426 stream_.mode = mode;
1429 // Setup the device property listener for over/underload.
1430 property.mSelector = kAudioDeviceProcessorOverload;
1431 property.mScope = kAudioObjectPropertyScopeGlobal;
1432 result = AudioObjectAddPropertyListener( id, &property, xrunListener, (void *) handle );
1438 pthread_cond_destroy( &handle->condition );
1440 stream_.apiHandle = 0;
1443 for ( int i=0; i<2; i++ ) {
1444 if ( stream_.userBuffer[i] ) {
1445 free( stream_.userBuffer[i] );
1446 stream_.userBuffer[i] = 0;
1450 if ( stream_.deviceBuffer ) {
1451 free( stream_.deviceBuffer );
1452 stream_.deviceBuffer = 0;
1455 stream_.state = STREAM_CLOSED;
1459 void RtApiCore :: closeStream( void )
1461 if ( stream_.state == STREAM_CLOSED ) {
1462 errorText_ = "RtApiCore::closeStream(): no open stream to close!";
1463 error( RtAudioError::WARNING );
1467 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1468 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1470 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
1471 kAudioObjectPropertyScopeGlobal,
1472 kAudioObjectPropertyElementMaster };
1474 property.mSelector = kAudioDeviceProcessorOverload;
1475 property.mScope = kAudioObjectPropertyScopeGlobal;
1476 if (AudioObjectRemovePropertyListener( handle->id[0], &property, xrunListener, (void *) handle ) != noErr) {
1477 errorText_ = "RtApiCore::closeStream(): error removing property listener!";
1478 error( RtAudioError::WARNING );
1481 if ( stream_.state == STREAM_RUNNING )
1482 AudioDeviceStop( handle->id[0], callbackHandler );
1483 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1484 AudioDeviceDestroyIOProcID( handle->id[0], handle->procId[0] );
1486 // deprecated in favor of AudioDeviceDestroyIOProcID()
1487 AudioDeviceRemoveIOProc( handle->id[0], callbackHandler );
1491 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1493 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
1494 kAudioObjectPropertyScopeGlobal,
1495 kAudioObjectPropertyElementMaster };
1497 property.mSelector = kAudioDeviceProcessorOverload;
1498 property.mScope = kAudioObjectPropertyScopeGlobal;
1499 if (AudioObjectRemovePropertyListener( handle->id[1], &property, xrunListener, (void *) handle ) != noErr) {
1500 errorText_ = "RtApiCore::closeStream(): error removing property listener!";
1501 error( RtAudioError::WARNING );
1504 if ( stream_.state == STREAM_RUNNING )
1505 AudioDeviceStop( handle->id[1], callbackHandler );
1506 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1507 AudioDeviceDestroyIOProcID( handle->id[1], handle->procId[1] );
1509 // deprecated in favor of AudioDeviceDestroyIOProcID()
1510 AudioDeviceRemoveIOProc( handle->id[1], callbackHandler );
1514 for ( int i=0; i<2; i++ ) {
1515 if ( stream_.userBuffer[i] ) {
1516 free( stream_.userBuffer[i] );
1517 stream_.userBuffer[i] = 0;
1521 if ( stream_.deviceBuffer ) {
1522 free( stream_.deviceBuffer );
1523 stream_.deviceBuffer = 0;
1526 // Destroy pthread condition variable.
1527 pthread_cond_destroy( &handle->condition );
1529 stream_.apiHandle = 0;
1531 stream_.mode = UNINITIALIZED;
1532 stream_.state = STREAM_CLOSED;
1535 void RtApiCore :: startStream( void )
1538 if ( stream_.state == STREAM_RUNNING ) {
1539 errorText_ = "RtApiCore::startStream(): the stream is already running!";
1540 error( RtAudioError::WARNING );
1544 #if defined( HAVE_GETTIMEOFDAY )
1545 gettimeofday( &stream_.lastTickTimestamp, NULL );
1548 OSStatus result = noErr;
1549 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1550 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1552 result = AudioDeviceStart( handle->id[0], callbackHandler );
1553 if ( result != noErr ) {
1554 errorStream_ << "RtApiCore::startStream: system error (" << getErrorCode( result ) << ") starting callback procedure on device (" << stream_.device[0] << ").";
1555 errorText_ = errorStream_.str();
1560 if ( stream_.mode == INPUT ||
1561 ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1563 result = AudioDeviceStart( handle->id[1], callbackHandler );
1564 if ( result != noErr ) {
1565 errorStream_ << "RtApiCore::startStream: system error starting input callback procedure on device (" << stream_.device[1] << ").";
1566 errorText_ = errorStream_.str();
1571 handle->drainCounter = 0;
1572 handle->internalDrain = false;
1573 stream_.state = STREAM_RUNNING;
1576 if ( result == noErr ) return;
1577 error( RtAudioError::SYSTEM_ERROR );
1580 void RtApiCore :: stopStream( void )
1583 if ( stream_.state == STREAM_STOPPED ) {
1584 errorText_ = "RtApiCore::stopStream(): the stream is already stopped!";
1585 error( RtAudioError::WARNING );
1589 OSStatus result = noErr;
1590 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1591 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1593 if ( handle->drainCounter == 0 ) {
1594 handle->drainCounter = 2;
1595 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
1598 result = AudioDeviceStop( handle->id[0], callbackHandler );
1599 if ( result != noErr ) {
1600 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping callback procedure on device (" << stream_.device[0] << ").";
1601 errorText_ = errorStream_.str();
1606 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1608 result = AudioDeviceStop( handle->id[1], callbackHandler );
1609 if ( result != noErr ) {
1610 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping input callback procedure on device (" << stream_.device[1] << ").";
1611 errorText_ = errorStream_.str();
1616 stream_.state = STREAM_STOPPED;
1619 if ( result == noErr ) return;
1620 error( RtAudioError::SYSTEM_ERROR );
1623 void RtApiCore :: abortStream( void )
1626 if ( stream_.state == STREAM_STOPPED ) {
1627 errorText_ = "RtApiCore::abortStream(): the stream is already stopped!";
1628 error( RtAudioError::WARNING );
1632 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1633 handle->drainCounter = 2;
1638 // This function will be called by a spawned thread when the user
1639 // callback function signals that the stream should be stopped or
1640 // aborted. It is better to handle it this way because the
1641 // callbackEvent() function probably should return before the AudioDeviceStop()
1642 // function is called.
1643 static void *coreStopStream( void *ptr )
1645 CallbackInfo *info = (CallbackInfo *) ptr;
1646 RtApiCore *object = (RtApiCore *) info->object;
1648 object->stopStream();
1649 pthread_exit( NULL );
1652 bool RtApiCore :: callbackEvent( AudioDeviceID deviceId,
1653 const AudioBufferList *inBufferList,
1654 const AudioBufferList *outBufferList )
1656 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
1657 if ( stream_.state == STREAM_CLOSED ) {
1658 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
1659 error( RtAudioError::WARNING );
1663 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
1664 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1666 // Check if we were draining the stream and signal is finished.
1667 if ( handle->drainCounter > 3 ) {
1668 ThreadHandle threadId;
1670 stream_.state = STREAM_STOPPING;
1671 if ( handle->internalDrain == true )
1672 pthread_create( &threadId, NULL, coreStopStream, info );
1673 else // external call to stopStream()
1674 pthread_cond_signal( &handle->condition );
1678 AudioDeviceID outputDevice = handle->id[0];
1680 // Invoke user callback to get fresh output data UNLESS we are
1681 // draining stream or duplex mode AND the input/output devices are
1682 // different AND this function is called for the input device.
1683 if ( handle->drainCounter == 0 && ( stream_.mode != DUPLEX || deviceId == outputDevice ) ) {
1684 RtAudioCallback callback = (RtAudioCallback) info->callback;
1685 double streamTime = getStreamTime();
1686 RtAudioStreamStatus status = 0;
1687 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
1688 status |= RTAUDIO_OUTPUT_UNDERFLOW;
1689 handle->xrun[0] = false;
1691 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
1692 status |= RTAUDIO_INPUT_OVERFLOW;
1693 handle->xrun[1] = false;
1696 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
1697 stream_.bufferSize, streamTime, status, info->userData );
1698 if ( cbReturnValue == 2 ) {
1699 stream_.state = STREAM_STOPPING;
1700 handle->drainCounter = 2;
1704 else if ( cbReturnValue == 1 ) {
1705 handle->drainCounter = 1;
1706 handle->internalDrain = true;
1710 if ( stream_.mode == OUTPUT || ( stream_.mode == DUPLEX && deviceId == outputDevice ) ) {
1712 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
1714 if ( handle->nStreams[0] == 1 ) {
1715 memset( outBufferList->mBuffers[handle->iStream[0]].mData,
1717 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
1719 else { // fill multiple streams with zeros
1720 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
1721 memset( outBufferList->mBuffers[handle->iStream[0]+i].mData,
1723 outBufferList->mBuffers[handle->iStream[0]+i].mDataByteSize );
1727 else if ( handle->nStreams[0] == 1 ) {
1728 if ( stream_.doConvertBuffer[0] ) { // convert directly to CoreAudio stream buffer
1729 convertBuffer( (char *) outBufferList->mBuffers[handle->iStream[0]].mData,
1730 stream_.userBuffer[0], stream_.convertInfo[0] );
1732 else { // copy from user buffer
1733 memcpy( outBufferList->mBuffers[handle->iStream[0]].mData,
1734 stream_.userBuffer[0],
1735 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
1738 else { // fill multiple streams
1739 Float32 *inBuffer = (Float32 *) stream_.userBuffer[0];
1740 if ( stream_.doConvertBuffer[0] ) {
1741 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
1742 inBuffer = (Float32 *) stream_.deviceBuffer;
1745 if ( stream_.deviceInterleaved[0] == false ) { // mono mode
1746 UInt32 bufferBytes = outBufferList->mBuffers[handle->iStream[0]].mDataByteSize;
1747 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
1748 memcpy( outBufferList->mBuffers[handle->iStream[0]+i].mData,
1749 (void *)&inBuffer[i*stream_.bufferSize], bufferBytes );
1752 else { // fill multiple multi-channel streams with interleaved data
1753 UInt32 streamChannels, channelsLeft, inJump, outJump, inOffset;
1756 bool inInterleaved = ( stream_.userInterleaved ) ? true : false;
1757 UInt32 inChannels = stream_.nUserChannels[0];
1758 if ( stream_.doConvertBuffer[0] ) {
1759 inInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
1760 inChannels = stream_.nDeviceChannels[0];
1763 if ( inInterleaved ) inOffset = 1;
1764 else inOffset = stream_.bufferSize;
1766 channelsLeft = inChannels;
1767 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
1769 out = (Float32 *) outBufferList->mBuffers[handle->iStream[0]+i].mData;
1770 streamChannels = outBufferList->mBuffers[handle->iStream[0]+i].mNumberChannels;
1773 // Account for possible channel offset in first stream
1774 if ( i == 0 && stream_.channelOffset[0] > 0 ) {
1775 streamChannels -= stream_.channelOffset[0];
1776 outJump = stream_.channelOffset[0];
1780 // Account for possible unfilled channels at end of the last stream
1781 if ( streamChannels > channelsLeft ) {
1782 outJump = streamChannels - channelsLeft;
1783 streamChannels = channelsLeft;
1786 // Determine input buffer offsets and skips
1787 if ( inInterleaved ) {
1788 inJump = inChannels;
1789 in += inChannels - channelsLeft;
1793 in += (inChannels - channelsLeft) * inOffset;
1796 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
1797 for ( unsigned int j=0; j<streamChannels; j++ ) {
1798 *out++ = in[j*inOffset];
1803 channelsLeft -= streamChannels;
1809 // Don't bother draining input
1810 if ( handle->drainCounter ) {
1811 handle->drainCounter++;
1815 AudioDeviceID inputDevice;
1816 inputDevice = handle->id[1];
1817 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && deviceId == inputDevice ) ) {
1819 if ( handle->nStreams[1] == 1 ) {
1820 if ( stream_.doConvertBuffer[1] ) { // convert directly from CoreAudio stream buffer
1821 convertBuffer( stream_.userBuffer[1],
1822 (char *) inBufferList->mBuffers[handle->iStream[1]].mData,
1823 stream_.convertInfo[1] );
1825 else { // copy to user buffer
1826 memcpy( stream_.userBuffer[1],
1827 inBufferList->mBuffers[handle->iStream[1]].mData,
1828 inBufferList->mBuffers[handle->iStream[1]].mDataByteSize );
1831 else { // read from multiple streams
1832 Float32 *outBuffer = (Float32 *) stream_.userBuffer[1];
1833 if ( stream_.doConvertBuffer[1] ) outBuffer = (Float32 *) stream_.deviceBuffer;
1835 if ( stream_.deviceInterleaved[1] == false ) { // mono mode
1836 UInt32 bufferBytes = inBufferList->mBuffers[handle->iStream[1]].mDataByteSize;
1837 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
1838 memcpy( (void *)&outBuffer[i*stream_.bufferSize],
1839 inBufferList->mBuffers[handle->iStream[1]+i].mData, bufferBytes );
1842 else { // read from multiple multi-channel streams
1843 UInt32 streamChannels, channelsLeft, inJump, outJump, outOffset;
1846 bool outInterleaved = ( stream_.userInterleaved ) ? true : false;
1847 UInt32 outChannels = stream_.nUserChannels[1];
1848 if ( stream_.doConvertBuffer[1] ) {
1849 outInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
1850 outChannels = stream_.nDeviceChannels[1];
1853 if ( outInterleaved ) outOffset = 1;
1854 else outOffset = stream_.bufferSize;
1856 channelsLeft = outChannels;
1857 for ( unsigned int i=0; i<handle->nStreams[1]; i++ ) {
1859 in = (Float32 *) inBufferList->mBuffers[handle->iStream[1]+i].mData;
1860 streamChannels = inBufferList->mBuffers[handle->iStream[1]+i].mNumberChannels;
1863 // Account for possible channel offset in first stream
1864 if ( i == 0 && stream_.channelOffset[1] > 0 ) {
1865 streamChannels -= stream_.channelOffset[1];
1866 inJump = stream_.channelOffset[1];
1870 // Account for possible unread channels at end of the last stream
1871 if ( streamChannels > channelsLeft ) {
1872 inJump = streamChannels - channelsLeft;
1873 streamChannels = channelsLeft;
1876 // Determine output buffer offsets and skips
1877 if ( outInterleaved ) {
1878 outJump = outChannels;
1879 out += outChannels - channelsLeft;
1883 out += (outChannels - channelsLeft) * outOffset;
1886 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
1887 for ( unsigned int j=0; j<streamChannels; j++ ) {
1888 out[j*outOffset] = *in++;
1893 channelsLeft -= streamChannels;
1897 if ( stream_.doConvertBuffer[1] ) { // convert from our internal "device" buffer
1898 convertBuffer( stream_.userBuffer[1],
1899 stream_.deviceBuffer,
1900 stream_.convertInfo[1] );
1906 //MUTEX_UNLOCK( &stream_.mutex );
1908 RtApi::tickStreamTime();
1912 const char* RtApiCore :: getErrorCode( OSStatus code )
1916 case kAudioHardwareNotRunningError:
1917 return "kAudioHardwareNotRunningError";
1919 case kAudioHardwareUnspecifiedError:
1920 return "kAudioHardwareUnspecifiedError";
1922 case kAudioHardwareUnknownPropertyError:
1923 return "kAudioHardwareUnknownPropertyError";
1925 case kAudioHardwareBadPropertySizeError:
1926 return "kAudioHardwareBadPropertySizeError";
1928 case kAudioHardwareIllegalOperationError:
1929 return "kAudioHardwareIllegalOperationError";
1931 case kAudioHardwareBadObjectError:
1932 return "kAudioHardwareBadObjectError";
1934 case kAudioHardwareBadDeviceError:
1935 return "kAudioHardwareBadDeviceError";
1937 case kAudioHardwareBadStreamError:
1938 return "kAudioHardwareBadStreamError";
1940 case kAudioHardwareUnsupportedOperationError:
1941 return "kAudioHardwareUnsupportedOperationError";
1943 case kAudioDeviceUnsupportedFormatError:
1944 return "kAudioDeviceUnsupportedFormatError";
1946 case kAudioDevicePermissionsError:
1947 return "kAudioDevicePermissionsError";
1950 return "CoreAudio unknown error";
1954 //******************** End of __MACOSX_CORE__ *********************//
1957 #if defined(__UNIX_JACK__)
1959 // JACK is a low-latency audio server, originally written for the
1960 // GNU/Linux operating system and now also ported to OS-X. It can
1961 // connect a number of different applications to an audio device, as
1962 // well as allowing them to share audio between themselves.
1964 // When using JACK with RtAudio, "devices" refer to JACK clients that
1965 // have ports connected to the server. The JACK server is typically
1966 // started in a terminal as follows:
1968 // .jackd -d alsa -d hw:0
1970 // or through an interface program such as qjackctl. Many of the
1971 // parameters normally set for a stream are fixed by the JACK server
1972 // and can be specified when the JACK server is started. In
1975 // .jackd -d alsa -d hw:0 -r 44100 -p 512 -n 4
1977 // specifies a sample rate of 44100 Hz, a buffer size of 512 sample
1978 // frames, and number of buffers = 4. Once the server is running, it
1979 // is not possible to override these values. If the values are not
1980 // specified in the command-line, the JACK server uses default values.
1982 // The JACK server does not have to be running when an instance of
1983 // RtApiJack is created, though the function getDeviceCount() will
1984 // report 0 devices found until JACK has been started. When no
1985 // devices are available (i.e., the JACK server is not running), a
1986 // stream cannot be opened.
1988 #include <jack/jack.h>
1992 // A structure to hold various information related to the Jack API
1995 jack_client_t *client;
1996 jack_port_t **ports[2];
1997 std::string deviceName[2];
1999 pthread_cond_t condition;
2000 int drainCounter; // Tracks callback counts when draining
2001 bool internalDrain; // Indicates if stop is initiated from callback or not.
2004 :client(0), drainCounter(0), internalDrain(false) { ports[0] = 0; ports[1] = 0; xrun[0] = false; xrun[1] = false; }
2007 #if !defined(__RTAUDIO_DEBUG__)
2008 static void jackSilentError( const char * ) {};
2011 RtApiJack :: RtApiJack()
2012 :shouldAutoconnect_(true) {
2013 // Nothing to do here.
2014 #if !defined(__RTAUDIO_DEBUG__)
2015 // Turn off Jack's internal error reporting.
2016 jack_set_error_function( &jackSilentError );
2020 RtApiJack :: ~RtApiJack()
2022 if ( stream_.state != STREAM_CLOSED ) closeStream();
2025 unsigned int RtApiJack :: getDeviceCount( void )
2027 // See if we can become a jack client.
2028 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
2029 jack_status_t *status = NULL;
2030 jack_client_t *client = jack_client_open( "RtApiJackCount", options, status );
2031 if ( client == 0 ) return 0;
2034 std::string port, previousPort;
2035 unsigned int nChannels = 0, nDevices = 0;
2036 ports = jack_get_ports( client, NULL, JACK_DEFAULT_AUDIO_TYPE, 0 );
2038 // Parse the port names up to the first colon (:).
2041 port = (char *) ports[ nChannels ];
2042 iColon = port.find(":");
2043 if ( iColon != std::string::npos ) {
2044 port = port.substr( 0, iColon + 1 );
2045 if ( port != previousPort ) {
2047 previousPort = port;
2050 } while ( ports[++nChannels] );
2054 jack_client_close( client );
2058 RtAudio::DeviceInfo RtApiJack :: getDeviceInfo( unsigned int device )
2060 RtAudio::DeviceInfo info;
2061 info.probed = false;
2063 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption
2064 jack_status_t *status = NULL;
2065 jack_client_t *client = jack_client_open( "RtApiJackInfo", options, status );
2066 if ( client == 0 ) {
2067 errorText_ = "RtApiJack::getDeviceInfo: Jack server not found or connection error!";
2068 error( RtAudioError::WARNING );
2073 std::string port, previousPort;
2074 unsigned int nPorts = 0, nDevices = 0;
2075 ports = jack_get_ports( client, NULL, JACK_DEFAULT_AUDIO_TYPE, 0 );
2077 // Parse the port names up to the first colon (:).
2080 port = (char *) ports[ nPorts ];
2081 iColon = port.find(":");
2082 if ( iColon != std::string::npos ) {
2083 port = port.substr( 0, iColon );
2084 if ( port != previousPort ) {
2085 if ( nDevices == device ) info.name = port;
2087 previousPort = port;
2090 } while ( ports[++nPorts] );
2094 if ( device >= nDevices ) {
2095 jack_client_close( client );
2096 errorText_ = "RtApiJack::getDeviceInfo: device ID is invalid!";
2097 error( RtAudioError::INVALID_USE );
2101 // Get the current jack server sample rate.
2102 info.sampleRates.clear();
2104 info.preferredSampleRate = jack_get_sample_rate( client );
2105 info.sampleRates.push_back( info.preferredSampleRate );
2107 // Count the available ports containing the client name as device
2108 // channels. Jack "input ports" equal RtAudio output channels.
2109 unsigned int nChannels = 0;
2110 ports = jack_get_ports( client, info.name.c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput );
2112 while ( ports[ nChannels ] ) nChannels++;
2114 info.outputChannels = nChannels;
2117 // Jack "output ports" equal RtAudio input channels.
2119 ports = jack_get_ports( client, info.name.c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput );
2121 while ( ports[ nChannels ] ) nChannels++;
2123 info.inputChannels = nChannels;
2126 if ( info.outputChannels == 0 && info.inputChannels == 0 ) {
2127 jack_client_close(client);
2128 errorText_ = "RtApiJack::getDeviceInfo: error determining Jack input/output channels!";
2129 error( RtAudioError::WARNING );
2133 // If device opens for both playback and capture, we determine the channels.
2134 if ( info.outputChannels > 0 && info.inputChannels > 0 )
2135 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
2137 // Jack always uses 32-bit floats.
2138 info.nativeFormats = RTAUDIO_FLOAT32;
2140 // Jack doesn't provide default devices so we'll use the first available one.
2141 if ( device == 0 && info.outputChannels > 0 )
2142 info.isDefaultOutput = true;
2143 if ( device == 0 && info.inputChannels > 0 )
2144 info.isDefaultInput = true;
2146 jack_client_close(client);
2151 static int jackCallbackHandler( jack_nframes_t nframes, void *infoPointer )
2153 CallbackInfo *info = (CallbackInfo *) infoPointer;
2155 RtApiJack *object = (RtApiJack *) info->object;
2156 if ( object->callbackEvent( (unsigned long) nframes ) == false ) return 1;
2161 // This function will be called by a spawned thread when the Jack
2162 // server signals that it is shutting down. It is necessary to handle
2163 // it this way because the jackShutdown() function must return before
2164 // the jack_deactivate() function (in closeStream()) will return.
2165 static void *jackCloseStream( void *ptr )
2167 CallbackInfo *info = (CallbackInfo *) ptr;
2168 RtApiJack *object = (RtApiJack *) info->object;
2170 object->closeStream();
2172 pthread_exit( NULL );
2174 static void jackShutdown( void *infoPointer )
2176 CallbackInfo *info = (CallbackInfo *) infoPointer;
2177 RtApiJack *object = (RtApiJack *) info->object;
2179 // Check current stream state. If stopped, then we'll assume this
2180 // was called as a result of a call to RtApiJack::stopStream (the
2181 // deactivation of a client handle causes this function to be called).
2182 // If not, we'll assume the Jack server is shutting down or some
2183 // other problem occurred and we should close the stream.
2184 if ( object->isStreamRunning() == false ) return;
2186 ThreadHandle threadId;
2187 pthread_create( &threadId, NULL, jackCloseStream, info );
2188 std::cerr << "\nRtApiJack: the Jack server is shutting down this client ... stream stopped and closed!!\n" << std::endl;
2191 static int jackXrun( void *infoPointer )
2193 JackHandle *handle = *((JackHandle **) infoPointer);
2195 if ( handle->ports[0] ) handle->xrun[0] = true;
2196 if ( handle->ports[1] ) handle->xrun[1] = true;
2201 bool RtApiJack :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
2202 unsigned int firstChannel, unsigned int sampleRate,
2203 RtAudioFormat format, unsigned int *bufferSize,
2204 RtAudio::StreamOptions *options )
2206 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2208 // Look for jack server and try to become a client (only do once per stream).
2209 jack_client_t *client = 0;
2210 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) {
2211 jack_options_t jackoptions = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
2212 jack_status_t *status = NULL;
2213 if ( options && !options->streamName.empty() )
2214 client = jack_client_open( options->streamName.c_str(), jackoptions, status );
2216 client = jack_client_open( "RtApiJack", jackoptions, status );
2217 if ( client == 0 ) {
2218 errorText_ = "RtApiJack::probeDeviceOpen: Jack server not found or connection error!";
2219 error( RtAudioError::WARNING );
2224 // The handle must have been created on an earlier pass.
2225 client = handle->client;
2229 std::string port, previousPort, deviceName;
2230 unsigned int nPorts = 0, nDevices = 0;
2231 ports = jack_get_ports( client, NULL, JACK_DEFAULT_AUDIO_TYPE, 0 );
2233 // Parse the port names up to the first colon (:).
2236 port = (char *) ports[ nPorts ];
2237 iColon = port.find(":");
2238 if ( iColon != std::string::npos ) {
2239 port = port.substr( 0, iColon );
2240 if ( port != previousPort ) {
2241 if ( nDevices == device ) deviceName = port;
2243 previousPort = port;
2246 } while ( ports[++nPorts] );
2250 if ( device >= nDevices ) {
2251 errorText_ = "RtApiJack::probeDeviceOpen: device ID is invalid!";
2255 unsigned long flag = JackPortIsInput;
2256 if ( mode == INPUT ) flag = JackPortIsOutput;
2258 if ( ! (options && (options->flags & RTAUDIO_JACK_DONT_CONNECT)) ) {
2259 // Count the available ports containing the client name as device
2260 // channels. Jack "input ports" equal RtAudio output channels.
2261 unsigned int nChannels = 0;
2262 ports = jack_get_ports( client, deviceName.c_str(), JACK_DEFAULT_AUDIO_TYPE, flag );
2264 while ( ports[ nChannels ] ) nChannels++;
2267 // Compare the jack ports for specified client to the requested number of channels.
2268 if ( nChannels < (channels + firstChannel) ) {
2269 errorStream_ << "RtApiJack::probeDeviceOpen: requested number of channels (" << channels << ") + offset (" << firstChannel << ") not found for specified device (" << device << ":" << deviceName << ").";
2270 errorText_ = errorStream_.str();
2275 // Check the jack server sample rate.
2276 unsigned int jackRate = jack_get_sample_rate( client );
2277 if ( sampleRate != jackRate ) {
2278 jack_client_close( client );
2279 errorStream_ << "RtApiJack::probeDeviceOpen: the requested sample rate (" << sampleRate << ") is different than the JACK server rate (" << jackRate << ").";
2280 errorText_ = errorStream_.str();
2283 stream_.sampleRate = jackRate;
2285 // Get the latency of the JACK port.
2286 ports = jack_get_ports( client, deviceName.c_str(), JACK_DEFAULT_AUDIO_TYPE, flag );
2287 if ( ports[ firstChannel ] ) {
2289 jack_latency_callback_mode_t cbmode = (mode == INPUT ? JackCaptureLatency : JackPlaybackLatency);
2290 // the range (usually the min and max are equal)
2291 jack_latency_range_t latrange; latrange.min = latrange.max = 0;
2292 // get the latency range
2293 jack_port_get_latency_range( jack_port_by_name( client, ports[firstChannel] ), cbmode, &latrange );
2294 // be optimistic, use the min!
2295 stream_.latency[mode] = latrange.min;
2296 //stream_.latency[mode] = jack_port_get_latency( jack_port_by_name( client, ports[ firstChannel ] ) );
2300 // The jack server always uses 32-bit floating-point data.
2301 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
2302 stream_.userFormat = format;
2304 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
2305 else stream_.userInterleaved = true;
2307 // Jack always uses non-interleaved buffers.
2308 stream_.deviceInterleaved[mode] = false;
2310 // Jack always provides host byte-ordered data.
2311 stream_.doByteSwap[mode] = false;
2313 // Get the buffer size. The buffer size and number of buffers
2314 // (periods) is set when the jack server is started.
2315 stream_.bufferSize = (int) jack_get_buffer_size( client );
2316 *bufferSize = stream_.bufferSize;
2318 stream_.nDeviceChannels[mode] = channels;
2319 stream_.nUserChannels[mode] = channels;
2321 // Set flags for buffer conversion.
2322 stream_.doConvertBuffer[mode] = false;
2323 if ( stream_.userFormat != stream_.deviceFormat[mode] )
2324 stream_.doConvertBuffer[mode] = true;
2325 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
2326 stream_.nUserChannels[mode] > 1 )
2327 stream_.doConvertBuffer[mode] = true;
2329 // Allocate our JackHandle structure for the stream.
2330 if ( handle == 0 ) {
2332 handle = new JackHandle;
2334 catch ( std::bad_alloc& ) {
2335 errorText_ = "RtApiJack::probeDeviceOpen: error allocating JackHandle memory.";
2339 if ( pthread_cond_init(&handle->condition, NULL) ) {
2340 errorText_ = "RtApiJack::probeDeviceOpen: error initializing pthread condition variable.";
2343 stream_.apiHandle = (void *) handle;
2344 handle->client = client;
2346 handle->deviceName[mode] = deviceName;
2348 // Allocate necessary internal buffers.
2349 unsigned long bufferBytes;
2350 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
2351 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
2352 if ( stream_.userBuffer[mode] == NULL ) {
2353 errorText_ = "RtApiJack::probeDeviceOpen: error allocating user buffer memory.";
2357 if ( stream_.doConvertBuffer[mode] ) {
2359 bool makeBuffer = true;
2360 if ( mode == OUTPUT )
2361 bufferBytes = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
2362 else { // mode == INPUT
2363 bufferBytes = stream_.nDeviceChannels[1] * formatBytes( stream_.deviceFormat[1] );
2364 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
2365 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes(stream_.deviceFormat[0]);
2366 if ( bufferBytes < bytesOut ) makeBuffer = false;
2371 bufferBytes *= *bufferSize;
2372 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
2373 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
2374 if ( stream_.deviceBuffer == NULL ) {
2375 errorText_ = "RtApiJack::probeDeviceOpen: error allocating device buffer memory.";
2381 // Allocate memory for the Jack ports (channels) identifiers.
2382 handle->ports[mode] = (jack_port_t **) malloc ( sizeof (jack_port_t *) * channels );
2383 if ( handle->ports[mode] == NULL ) {
2384 errorText_ = "RtApiJack::probeDeviceOpen: error allocating port memory.";
2388 stream_.device[mode] = device;
2389 stream_.channelOffset[mode] = firstChannel;
2390 stream_.state = STREAM_STOPPED;
2391 stream_.callbackInfo.object = (void *) this;
2393 if ( stream_.mode == OUTPUT && mode == INPUT )
2394 // We had already set up the stream for output.
2395 stream_.mode = DUPLEX;
2397 stream_.mode = mode;
2398 jack_set_process_callback( handle->client, jackCallbackHandler, (void *) &stream_.callbackInfo );
2399 jack_set_xrun_callback( handle->client, jackXrun, (void *) &stream_.apiHandle );
2400 jack_on_shutdown( handle->client, jackShutdown, (void *) &stream_.callbackInfo );
2403 // Register our ports.
2405 if ( mode == OUTPUT ) {
2406 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2407 snprintf( label, 64, "outport %d", i );
2408 handle->ports[0][i] = jack_port_register( handle->client, (const char *)label,
2409 JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput, 0 );
2413 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2414 snprintf( label, 64, "inport %d", i );
2415 handle->ports[1][i] = jack_port_register( handle->client, (const char *)label,
2416 JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput, 0 );
2420 // Setup the buffer conversion information structure. We don't use
2421 // buffers to do channel offsets, so we override that parameter
2423 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
2425 if ( options && options->flags & RTAUDIO_JACK_DONT_CONNECT ) shouldAutoconnect_ = false;
2431 pthread_cond_destroy( &handle->condition );
2432 jack_client_close( handle->client );
2434 if ( handle->ports[0] ) free( handle->ports[0] );
2435 if ( handle->ports[1] ) free( handle->ports[1] );
2438 stream_.apiHandle = 0;
2441 for ( int i=0; i<2; i++ ) {
2442 if ( stream_.userBuffer[i] ) {
2443 free( stream_.userBuffer[i] );
2444 stream_.userBuffer[i] = 0;
2448 if ( stream_.deviceBuffer ) {
2449 free( stream_.deviceBuffer );
2450 stream_.deviceBuffer = 0;
2456 void RtApiJack :: closeStream( void )
2458 if ( stream_.state == STREAM_CLOSED ) {
2459 errorText_ = "RtApiJack::closeStream(): no open stream to close!";
2460 error( RtAudioError::WARNING );
2464 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2467 if ( stream_.state == STREAM_RUNNING )
2468 jack_deactivate( handle->client );
2470 jack_client_close( handle->client );
2474 if ( handle->ports[0] ) free( handle->ports[0] );
2475 if ( handle->ports[1] ) free( handle->ports[1] );
2476 pthread_cond_destroy( &handle->condition );
2478 stream_.apiHandle = 0;
2481 for ( int i=0; i<2; i++ ) {
2482 if ( stream_.userBuffer[i] ) {
2483 free( stream_.userBuffer[i] );
2484 stream_.userBuffer[i] = 0;
2488 if ( stream_.deviceBuffer ) {
2489 free( stream_.deviceBuffer );
2490 stream_.deviceBuffer = 0;
2493 stream_.mode = UNINITIALIZED;
2494 stream_.state = STREAM_CLOSED;
2497 void RtApiJack :: startStream( void )
2500 if ( stream_.state == STREAM_RUNNING ) {
2501 errorText_ = "RtApiJack::startStream(): the stream is already running!";
2502 error( RtAudioError::WARNING );
2506 #if defined( HAVE_GETTIMEOFDAY )
2507 gettimeofday( &stream_.lastTickTimestamp, NULL );
2510 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2511 int result = jack_activate( handle->client );
2513 errorText_ = "RtApiJack::startStream(): unable to activate JACK client!";
2519 // Get the list of available ports.
2520 if ( shouldAutoconnect_ && (stream_.mode == OUTPUT || stream_.mode == DUPLEX) ) {
2522 ports = jack_get_ports( handle->client, handle->deviceName[0].c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput);
2523 if ( ports == NULL) {
2524 errorText_ = "RtApiJack::startStream(): error determining available JACK input ports!";
2528 // Now make the port connections. Since RtAudio wasn't designed to
2529 // allow the user to select particular channels of a device, we'll
2530 // just open the first "nChannels" ports with offset.
2531 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2533 if ( ports[ stream_.channelOffset[0] + i ] )
2534 result = jack_connect( handle->client, jack_port_name( handle->ports[0][i] ), ports[ stream_.channelOffset[0] + i ] );
2537 errorText_ = "RtApiJack::startStream(): error connecting output ports!";
2544 if ( shouldAutoconnect_ && (stream_.mode == INPUT || stream_.mode == DUPLEX) ) {
2546 ports = jack_get_ports( handle->client, handle->deviceName[1].c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput );
2547 if ( ports == NULL) {
2548 errorText_ = "RtApiJack::startStream(): error determining available JACK output ports!";
2552 // Now make the port connections. See note above.
2553 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2555 if ( ports[ stream_.channelOffset[1] + i ] )
2556 result = jack_connect( handle->client, ports[ stream_.channelOffset[1] + i ], jack_port_name( handle->ports[1][i] ) );
2559 errorText_ = "RtApiJack::startStream(): error connecting input ports!";
2566 handle->drainCounter = 0;
2567 handle->internalDrain = false;
2568 stream_.state = STREAM_RUNNING;
2571 if ( result == 0 ) return;
2572 error( RtAudioError::SYSTEM_ERROR );
2575 void RtApiJack :: stopStream( void )
2578 if ( stream_.state == STREAM_STOPPED ) {
2579 errorText_ = "RtApiJack::stopStream(): the stream is already stopped!";
2580 error( RtAudioError::WARNING );
2584 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2585 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
2587 if ( handle->drainCounter == 0 ) {
2588 handle->drainCounter = 2;
2589 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
2593 jack_deactivate( handle->client );
2594 stream_.state = STREAM_STOPPED;
2597 void RtApiJack :: abortStream( void )
2600 if ( stream_.state == STREAM_STOPPED ) {
2601 errorText_ = "RtApiJack::abortStream(): the stream is already stopped!";
2602 error( RtAudioError::WARNING );
2606 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2607 handle->drainCounter = 2;
2612 // This function will be called by a spawned thread when the user
2613 // callback function signals that the stream should be stopped or
2614 // aborted. It is necessary to handle it this way because the
2615 // callbackEvent() function must return before the jack_deactivate()
2616 // function will return.
2617 static void *jackStopStream( void *ptr )
2619 CallbackInfo *info = (CallbackInfo *) ptr;
2620 RtApiJack *object = (RtApiJack *) info->object;
2622 object->stopStream();
2623 pthread_exit( NULL );
2626 bool RtApiJack :: callbackEvent( unsigned long nframes )
2628 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
2629 if ( stream_.state == STREAM_CLOSED ) {
2630 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
2631 error( RtAudioError::WARNING );
2634 if ( stream_.bufferSize != nframes ) {
2635 errorText_ = "RtApiCore::callbackEvent(): the JACK buffer size has changed ... cannot process!";
2636 error( RtAudioError::WARNING );
2640 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
2641 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2643 // Check if we were draining the stream and signal is finished.
2644 if ( handle->drainCounter > 3 ) {
2645 ThreadHandle threadId;
2647 stream_.state = STREAM_STOPPING;
2648 if ( handle->internalDrain == true )
2649 pthread_create( &threadId, NULL, jackStopStream, info );
2651 pthread_cond_signal( &handle->condition );
2655 // Invoke user callback first, to get fresh output data.
2656 if ( handle->drainCounter == 0 ) {
2657 RtAudioCallback callback = (RtAudioCallback) info->callback;
2658 double streamTime = getStreamTime();
2659 RtAudioStreamStatus status = 0;
2660 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
2661 status |= RTAUDIO_OUTPUT_UNDERFLOW;
2662 handle->xrun[0] = false;
2664 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
2665 status |= RTAUDIO_INPUT_OVERFLOW;
2666 handle->xrun[1] = false;
2668 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
2669 stream_.bufferSize, streamTime, status, info->userData );
2670 if ( cbReturnValue == 2 ) {
2671 stream_.state = STREAM_STOPPING;
2672 handle->drainCounter = 2;
2674 pthread_create( &id, NULL, jackStopStream, info );
2677 else if ( cbReturnValue == 1 ) {
2678 handle->drainCounter = 1;
2679 handle->internalDrain = true;
2683 jack_default_audio_sample_t *jackbuffer;
2684 unsigned long bufferBytes = nframes * sizeof( jack_default_audio_sample_t );
2685 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
2687 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
2689 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
2690 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2691 memset( jackbuffer, 0, bufferBytes );
2695 else if ( stream_.doConvertBuffer[0] ) {
2697 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
2699 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
2700 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2701 memcpy( jackbuffer, &stream_.deviceBuffer[i*bufferBytes], bufferBytes );
2704 else { // no buffer conversion
2705 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2706 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2707 memcpy( jackbuffer, &stream_.userBuffer[0][i*bufferBytes], bufferBytes );
2712 // Don't bother draining input
2713 if ( handle->drainCounter ) {
2714 handle->drainCounter++;
2718 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
2720 if ( stream_.doConvertBuffer[1] ) {
2721 for ( unsigned int i=0; i<stream_.nDeviceChannels[1]; i++ ) {
2722 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
2723 memcpy( &stream_.deviceBuffer[i*bufferBytes], jackbuffer, bufferBytes );
2725 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
2727 else { // no buffer conversion
2728 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2729 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
2730 memcpy( &stream_.userBuffer[1][i*bufferBytes], jackbuffer, bufferBytes );
2736 RtApi::tickStreamTime();
2739 //******************** End of __UNIX_JACK__ *********************//
2742 #if defined(__WINDOWS_ASIO__) // ASIO API on Windows
2744 // The ASIO API is designed around a callback scheme, so this
2745 // implementation is similar to that used for OS-X CoreAudio and Linux
2746 // Jack. The primary constraint with ASIO is that it only allows
2747 // access to a single driver at a time. Thus, it is not possible to
2748 // have more than one simultaneous RtAudio stream.
2750 // This implementation also requires a number of external ASIO files
2751 // and a few global variables. The ASIO callback scheme does not
2752 // allow for the passing of user data, so we must create a global
2753 // pointer to our callbackInfo structure.
2755 // On unix systems, we make use of a pthread condition variable.
2756 // Since there is no equivalent in Windows, I hacked something based
2757 // on information found in
2758 // http://www.cs.wustl.edu/~schmidt/win32-cv-1.html.
2760 #include "asiosys.h"
2762 #include "iasiothiscallresolver.h"
2763 #include "asiodrivers.h"
2766 static AsioDrivers drivers;
2767 static ASIOCallbacks asioCallbacks;
2768 static ASIODriverInfo driverInfo;
2769 static CallbackInfo *asioCallbackInfo;
2770 static bool asioXRun;
2773 int drainCounter; // Tracks callback counts when draining
2774 bool internalDrain; // Indicates if stop is initiated from callback or not.
2775 ASIOBufferInfo *bufferInfos;
2779 :drainCounter(0), internalDrain(false), bufferInfos(0) {}
2782 // Function declarations (definitions at end of section)
2783 static const char* getAsioErrorString( ASIOError result );
2784 static void sampleRateChanged( ASIOSampleRate sRate );
2785 static long asioMessages( long selector, long value, void* message, double* opt );
2787 RtApiAsio :: RtApiAsio()
2789 // ASIO cannot run on a multi-threaded appartment. You can call
2790 // CoInitialize beforehand, but it must be for appartment threading
2791 // (in which case, CoInitilialize will return S_FALSE here).
2792 coInitialized_ = false;
2793 HRESULT hr = CoInitialize( NULL );
2795 errorText_ = "RtApiAsio::ASIO requires a single-threaded appartment. Call CoInitializeEx(0,COINIT_APARTMENTTHREADED)";
2796 error( RtAudioError::WARNING );
2798 coInitialized_ = true;
2800 drivers.removeCurrentDriver();
2801 driverInfo.asioVersion = 2;
2803 // See note in DirectSound implementation about GetDesktopWindow().
2804 driverInfo.sysRef = GetForegroundWindow();
2807 RtApiAsio :: ~RtApiAsio()
2809 if ( stream_.state != STREAM_CLOSED ) closeStream();
2810 if ( coInitialized_ ) CoUninitialize();
2813 unsigned int RtApiAsio :: getDeviceCount( void )
2815 return (unsigned int) drivers.asioGetNumDev();
2818 RtAudio::DeviceInfo RtApiAsio :: getDeviceInfo( unsigned int device )
2820 RtAudio::DeviceInfo info;
2821 info.probed = false;
2824 unsigned int nDevices = getDeviceCount();
2825 if ( nDevices == 0 ) {
2826 errorText_ = "RtApiAsio::getDeviceInfo: no devices found!";
2827 error( RtAudioError::INVALID_USE );
2831 if ( device >= nDevices ) {
2832 errorText_ = "RtApiAsio::getDeviceInfo: device ID is invalid!";
2833 error( RtAudioError::INVALID_USE );
2837 // If a stream is already open, we cannot probe other devices. Thus, use the saved results.
2838 if ( stream_.state != STREAM_CLOSED ) {
2839 if ( device >= devices_.size() ) {
2840 errorText_ = "RtApiAsio::getDeviceInfo: device ID was not present before stream was opened.";
2841 error( RtAudioError::WARNING );
2844 return devices_[ device ];
2847 char driverName[32];
2848 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
2849 if ( result != ASE_OK ) {
2850 errorStream_ << "RtApiAsio::getDeviceInfo: unable to get driver name (" << getAsioErrorString( result ) << ").";
2851 errorText_ = errorStream_.str();
2852 error( RtAudioError::WARNING );
2856 info.name = driverName;
2858 if ( !drivers.loadDriver( driverName ) ) {
2859 errorStream_ << "RtApiAsio::getDeviceInfo: unable to load driver (" << driverName << ").";
2860 errorText_ = errorStream_.str();
2861 error( RtAudioError::WARNING );
2865 result = ASIOInit( &driverInfo );
2866 if ( result != ASE_OK ) {
2867 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
2868 errorText_ = errorStream_.str();
2869 error( RtAudioError::WARNING );
2873 // Determine the device channel information.
2874 long inputChannels, outputChannels;
2875 result = ASIOGetChannels( &inputChannels, &outputChannels );
2876 if ( result != ASE_OK ) {
2877 drivers.removeCurrentDriver();
2878 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
2879 errorText_ = errorStream_.str();
2880 error( RtAudioError::WARNING );
2884 info.outputChannels = outputChannels;
2885 info.inputChannels = inputChannels;
2886 if ( info.outputChannels > 0 && info.inputChannels > 0 )
2887 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
2889 // Determine the supported sample rates.
2890 info.sampleRates.clear();
2891 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
2892 result = ASIOCanSampleRate( (ASIOSampleRate) SAMPLE_RATES[i] );
2893 if ( result == ASE_OK ) {
2894 info.sampleRates.push_back( SAMPLE_RATES[i] );
2896 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
2897 info.preferredSampleRate = SAMPLE_RATES[i];
2901 // Determine supported data types ... just check first channel and assume rest are the same.
2902 ASIOChannelInfo channelInfo;
2903 channelInfo.channel = 0;
2904 channelInfo.isInput = true;
2905 if ( info.inputChannels <= 0 ) channelInfo.isInput = false;
2906 result = ASIOGetChannelInfo( &channelInfo );
2907 if ( result != ASE_OK ) {
2908 drivers.removeCurrentDriver();
2909 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting driver channel info (" << driverName << ").";
2910 errorText_ = errorStream_.str();
2911 error( RtAudioError::WARNING );
2915 info.nativeFormats = 0;
2916 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB )
2917 info.nativeFormats |= RTAUDIO_SINT16;
2918 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB )
2919 info.nativeFormats |= RTAUDIO_SINT32;
2920 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB )
2921 info.nativeFormats |= RTAUDIO_FLOAT32;
2922 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB )
2923 info.nativeFormats |= RTAUDIO_FLOAT64;
2924 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB )
2925 info.nativeFormats |= RTAUDIO_SINT24;
2927 if ( info.outputChannels > 0 )
2928 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
2929 if ( info.inputChannels > 0 )
2930 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
2933 drivers.removeCurrentDriver();
2937 static void bufferSwitch( long index, ASIOBool /*processNow*/ )
2939 RtApiAsio *object = (RtApiAsio *) asioCallbackInfo->object;
2940 object->callbackEvent( index );
2943 void RtApiAsio :: saveDeviceInfo( void )
2947 unsigned int nDevices = getDeviceCount();
2948 devices_.resize( nDevices );
2949 for ( unsigned int i=0; i<nDevices; i++ )
2950 devices_[i] = getDeviceInfo( i );
2953 bool RtApiAsio :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
2954 unsigned int firstChannel, unsigned int sampleRate,
2955 RtAudioFormat format, unsigned int *bufferSize,
2956 RtAudio::StreamOptions *options )
2957 {////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
2959 bool isDuplexInput = mode == INPUT && stream_.mode == OUTPUT;
2961 // For ASIO, a duplex stream MUST use the same driver.
2962 if ( isDuplexInput && stream_.device[0] != device ) {
2963 errorText_ = "RtApiAsio::probeDeviceOpen: an ASIO duplex stream must use the same device for input and output!";
2967 char driverName[32];
2968 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
2969 if ( result != ASE_OK ) {
2970 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to get driver name (" << getAsioErrorString( result ) << ").";
2971 errorText_ = errorStream_.str();
2975 // Only load the driver once for duplex stream.
2976 if ( !isDuplexInput ) {
2977 // The getDeviceInfo() function will not work when a stream is open
2978 // because ASIO does not allow multiple devices to run at the same
2979 // time. Thus, we'll probe the system before opening a stream and
2980 // save the results for use by getDeviceInfo().
2981 this->saveDeviceInfo();
2983 if ( !drivers.loadDriver( driverName ) ) {
2984 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to load driver (" << driverName << ").";
2985 errorText_ = errorStream_.str();
2989 result = ASIOInit( &driverInfo );
2990 if ( result != ASE_OK ) {
2991 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
2992 errorText_ = errorStream_.str();
2997 // keep them before any "goto error", they are used for error cleanup + goto device boundary checks
2998 bool buffersAllocated = false;
2999 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3000 unsigned int nChannels;
3003 // Check the device channel count.
3004 long inputChannels, outputChannels;
3005 result = ASIOGetChannels( &inputChannels, &outputChannels );
3006 if ( result != ASE_OK ) {
3007 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
3008 errorText_ = errorStream_.str();
3012 if ( ( mode == OUTPUT && (channels+firstChannel) > (unsigned int) outputChannels) ||
3013 ( mode == INPUT && (channels+firstChannel) > (unsigned int) inputChannels) ) {
3014 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested channel count (" << channels << ") + offset (" << firstChannel << ").";
3015 errorText_ = errorStream_.str();
3018 stream_.nDeviceChannels[mode] = channels;
3019 stream_.nUserChannels[mode] = channels;
3020 stream_.channelOffset[mode] = firstChannel;
3022 // Verify the sample rate is supported.
3023 result = ASIOCanSampleRate( (ASIOSampleRate) sampleRate );
3024 if ( result != ASE_OK ) {
3025 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested sample rate (" << sampleRate << ").";
3026 errorText_ = errorStream_.str();
3030 // Get the current sample rate
3031 ASIOSampleRate currentRate;
3032 result = ASIOGetSampleRate( ¤tRate );
3033 if ( result != ASE_OK ) {
3034 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error getting sample rate.";
3035 errorText_ = errorStream_.str();
3039 // Set the sample rate only if necessary
3040 if ( currentRate != sampleRate ) {
3041 result = ASIOSetSampleRate( (ASIOSampleRate) sampleRate );
3042 if ( result != ASE_OK ) {
3043 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error setting sample rate (" << sampleRate << ").";
3044 errorText_ = errorStream_.str();
3049 // Determine the driver data type.
3050 ASIOChannelInfo channelInfo;
3051 channelInfo.channel = 0;
3052 if ( mode == OUTPUT ) channelInfo.isInput = false;
3053 else channelInfo.isInput = true;
3054 result = ASIOGetChannelInfo( &channelInfo );
3055 if ( result != ASE_OK ) {
3056 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting data format.";
3057 errorText_ = errorStream_.str();
3061 // Assuming WINDOWS host is always little-endian.
3062 stream_.doByteSwap[mode] = false;
3063 stream_.userFormat = format;
3064 stream_.deviceFormat[mode] = 0;
3065 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB ) {
3066 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
3067 if ( channelInfo.type == ASIOSTInt16MSB ) stream_.doByteSwap[mode] = true;
3069 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB ) {
3070 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
3071 if ( channelInfo.type == ASIOSTInt32MSB ) stream_.doByteSwap[mode] = true;
3073 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB ) {
3074 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
3075 if ( channelInfo.type == ASIOSTFloat32MSB ) stream_.doByteSwap[mode] = true;
3077 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB ) {
3078 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
3079 if ( channelInfo.type == ASIOSTFloat64MSB ) stream_.doByteSwap[mode] = true;
3081 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB ) {
3082 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
3083 if ( channelInfo.type == ASIOSTInt24MSB ) stream_.doByteSwap[mode] = true;
3086 if ( stream_.deviceFormat[mode] == 0 ) {
3087 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") data format not supported by RtAudio.";
3088 errorText_ = errorStream_.str();
3092 // Set the buffer size. For a duplex stream, this will end up
3093 // setting the buffer size based on the input constraints, which
3095 long minSize, maxSize, preferSize, granularity;
3096 result = ASIOGetBufferSize( &minSize, &maxSize, &preferSize, &granularity );
3097 if ( result != ASE_OK ) {
3098 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting buffer size.";
3099 errorText_ = errorStream_.str();
3103 if ( isDuplexInput ) {
3104 // When this is the duplex input (output was opened before), then we have to use the same
3105 // buffersize as the output, because it might use the preferred buffer size, which most
3106 // likely wasn't passed as input to this. The buffer sizes have to be identically anyway,
3107 // So instead of throwing an error, make them equal. The caller uses the reference
3108 // to the "bufferSize" param as usual to set up processing buffers.
3110 *bufferSize = stream_.bufferSize;
3113 if ( *bufferSize == 0 ) *bufferSize = preferSize;
3114 else if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
3115 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
3116 else if ( granularity == -1 ) {
3117 // Make sure bufferSize is a power of two.
3118 int log2_of_min_size = 0;
3119 int log2_of_max_size = 0;
3121 for ( unsigned int i = 0; i < sizeof(long) * 8; i++ ) {
3122 if ( minSize & ((long)1 << i) ) log2_of_min_size = i;
3123 if ( maxSize & ((long)1 << i) ) log2_of_max_size = i;
3126 long min_delta = std::abs( (long)*bufferSize - ((long)1 << log2_of_min_size) );
3127 int min_delta_num = log2_of_min_size;
3129 for (int i = log2_of_min_size + 1; i <= log2_of_max_size; i++) {
3130 long current_delta = std::abs( (long)*bufferSize - ((long)1 << i) );
3131 if (current_delta < min_delta) {
3132 min_delta = current_delta;
3137 *bufferSize = ( (unsigned int)1 << min_delta_num );
3138 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
3139 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
3141 else if ( granularity != 0 ) {
3142 // Set to an even multiple of granularity, rounding up.
3143 *bufferSize = (*bufferSize + granularity-1) / granularity * granularity;
3148 // we don't use it anymore, see above!
3149 // Just left it here for the case...
3150 if ( isDuplexInput && stream_.bufferSize != *bufferSize ) {
3151 errorText_ = "RtApiAsio::probeDeviceOpen: input/output buffersize discrepancy!";
3156 stream_.bufferSize = *bufferSize;
3157 stream_.nBuffers = 2;
3159 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
3160 else stream_.userInterleaved = true;
3162 // ASIO always uses non-interleaved buffers.
3163 stream_.deviceInterleaved[mode] = false;
3165 // Allocate, if necessary, our AsioHandle structure for the stream.
3166 if ( handle == 0 ) {
3168 handle = new AsioHandle;
3170 catch ( std::bad_alloc& ) {
3171 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating AsioHandle memory.";
3174 handle->bufferInfos = 0;
3176 // Create a manual-reset event.
3177 handle->condition = CreateEvent( NULL, // no security
3178 TRUE, // manual-reset
3179 FALSE, // non-signaled initially
3181 stream_.apiHandle = (void *) handle;
3184 // Create the ASIO internal buffers. Since RtAudio sets up input
3185 // and output separately, we'll have to dispose of previously
3186 // created output buffers for a duplex stream.
3187 if ( mode == INPUT && stream_.mode == OUTPUT ) {
3188 ASIODisposeBuffers();
3189 if ( handle->bufferInfos ) free( handle->bufferInfos );
3192 // Allocate, initialize, and save the bufferInfos in our stream callbackInfo structure.
3194 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
3195 handle->bufferInfos = (ASIOBufferInfo *) malloc( nChannels * sizeof(ASIOBufferInfo) );
3196 if ( handle->bufferInfos == NULL ) {
3197 errorStream_ << "RtApiAsio::probeDeviceOpen: error allocating bufferInfo memory for driver (" << driverName << ").";
3198 errorText_ = errorStream_.str();
3202 ASIOBufferInfo *infos;
3203 infos = handle->bufferInfos;
3204 for ( i=0; i<stream_.nDeviceChannels[0]; i++, infos++ ) {
3205 infos->isInput = ASIOFalse;
3206 infos->channelNum = i + stream_.channelOffset[0];
3207 infos->buffers[0] = infos->buffers[1] = 0;
3209 for ( i=0; i<stream_.nDeviceChannels[1]; i++, infos++ ) {
3210 infos->isInput = ASIOTrue;
3211 infos->channelNum = i + stream_.channelOffset[1];
3212 infos->buffers[0] = infos->buffers[1] = 0;
3215 // prepare for callbacks
3216 stream_.sampleRate = sampleRate;
3217 stream_.device[mode] = device;
3218 stream_.mode = isDuplexInput ? DUPLEX : mode;
3220 // store this class instance before registering callbacks, that are going to use it
3221 asioCallbackInfo = &stream_.callbackInfo;
3222 stream_.callbackInfo.object = (void *) this;
3224 // Set up the ASIO callback structure and create the ASIO data buffers.
3225 asioCallbacks.bufferSwitch = &bufferSwitch;
3226 asioCallbacks.sampleRateDidChange = &sampleRateChanged;
3227 asioCallbacks.asioMessage = &asioMessages;
3228 asioCallbacks.bufferSwitchTimeInfo = NULL;
3229 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
3230 if ( result != ASE_OK ) {
3231 // Standard method failed. This can happen with strict/misbehaving drivers that return valid buffer size ranges
3232 // but only accept the preferred buffer size as parameter for ASIOCreateBuffers (e.g. Creative's ASIO driver).
3233 // In that case, let's be naïve and try that instead.
3234 *bufferSize = preferSize;
3235 stream_.bufferSize = *bufferSize;
3236 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
3239 if ( result != ASE_OK ) {
3240 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") creating buffers.";
3241 errorText_ = errorStream_.str();
3244 buffersAllocated = true;
3245 stream_.state = STREAM_STOPPED;
3247 // Set flags for buffer conversion.
3248 stream_.doConvertBuffer[mode] = false;
3249 if ( stream_.userFormat != stream_.deviceFormat[mode] )
3250 stream_.doConvertBuffer[mode] = true;
3251 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
3252 stream_.nUserChannels[mode] > 1 )
3253 stream_.doConvertBuffer[mode] = true;
3255 // Allocate necessary internal buffers
3256 unsigned long bufferBytes;
3257 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
3258 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
3259 if ( stream_.userBuffer[mode] == NULL ) {
3260 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating user buffer memory.";
3264 if ( stream_.doConvertBuffer[mode] ) {
3266 bool makeBuffer = true;
3267 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
3268 if ( isDuplexInput && stream_.deviceBuffer ) {
3269 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
3270 if ( bufferBytes <= bytesOut ) makeBuffer = false;
3274 bufferBytes *= *bufferSize;
3275 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
3276 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
3277 if ( stream_.deviceBuffer == NULL ) {
3278 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating device buffer memory.";
3284 // Determine device latencies
3285 long inputLatency, outputLatency;
3286 result = ASIOGetLatencies( &inputLatency, &outputLatency );
3287 if ( result != ASE_OK ) {
3288 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting latency.";
3289 errorText_ = errorStream_.str();
3290 error( RtAudioError::WARNING); // warn but don't fail
3293 stream_.latency[0] = outputLatency;
3294 stream_.latency[1] = inputLatency;
3297 // Setup the buffer conversion information structure. We don't use
3298 // buffers to do channel offsets, so we override that parameter
3300 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
3305 if ( !isDuplexInput ) {
3306 // the cleanup for error in the duplex input, is done by RtApi::openStream
3307 // So we clean up for single channel only
3309 if ( buffersAllocated )
3310 ASIODisposeBuffers();
3312 drivers.removeCurrentDriver();
3315 CloseHandle( handle->condition );
3316 if ( handle->bufferInfos )
3317 free( handle->bufferInfos );
3320 stream_.apiHandle = 0;
3324 if ( stream_.userBuffer[mode] ) {
3325 free( stream_.userBuffer[mode] );
3326 stream_.userBuffer[mode] = 0;
3329 if ( stream_.deviceBuffer ) {
3330 free( stream_.deviceBuffer );
3331 stream_.deviceBuffer = 0;
3336 }////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
3338 void RtApiAsio :: closeStream()
3340 if ( stream_.state == STREAM_CLOSED ) {
3341 errorText_ = "RtApiAsio::closeStream(): no open stream to close!";
3342 error( RtAudioError::WARNING );
3346 if ( stream_.state == STREAM_RUNNING ) {
3347 stream_.state = STREAM_STOPPED;
3350 ASIODisposeBuffers();
3351 drivers.removeCurrentDriver();
3353 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3355 CloseHandle( handle->condition );
3356 if ( handle->bufferInfos )
3357 free( handle->bufferInfos );
3359 stream_.apiHandle = 0;
3362 for ( int i=0; i<2; i++ ) {
3363 if ( stream_.userBuffer[i] ) {
3364 free( stream_.userBuffer[i] );
3365 stream_.userBuffer[i] = 0;
3369 if ( stream_.deviceBuffer ) {
3370 free( stream_.deviceBuffer );
3371 stream_.deviceBuffer = 0;
3374 stream_.mode = UNINITIALIZED;
3375 stream_.state = STREAM_CLOSED;
3378 bool stopThreadCalled = false;
3380 void RtApiAsio :: startStream()
3383 if ( stream_.state == STREAM_RUNNING ) {
3384 errorText_ = "RtApiAsio::startStream(): the stream is already running!";
3385 error( RtAudioError::WARNING );
3389 #if defined( HAVE_GETTIMEOFDAY )
3390 gettimeofday( &stream_.lastTickTimestamp, NULL );
3393 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3394 ASIOError result = ASIOStart();
3395 if ( result != ASE_OK ) {
3396 errorStream_ << "RtApiAsio::startStream: error (" << getAsioErrorString( result ) << ") starting device.";
3397 errorText_ = errorStream_.str();
3401 handle->drainCounter = 0;
3402 handle->internalDrain = false;
3403 ResetEvent( handle->condition );
3404 stream_.state = STREAM_RUNNING;
3408 stopThreadCalled = false;
3410 if ( result == ASE_OK ) return;
3411 error( RtAudioError::SYSTEM_ERROR );
3414 void RtApiAsio :: stopStream()
3417 if ( stream_.state == STREAM_STOPPED ) {
3418 errorText_ = "RtApiAsio::stopStream(): the stream is already stopped!";
3419 error( RtAudioError::WARNING );
3423 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3424 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
3425 if ( handle->drainCounter == 0 ) {
3426 handle->drainCounter = 2;
3427 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
3431 stream_.state = STREAM_STOPPED;
3433 ASIOError result = ASIOStop();
3434 if ( result != ASE_OK ) {
3435 errorStream_ << "RtApiAsio::stopStream: error (" << getAsioErrorString( result ) << ") stopping device.";
3436 errorText_ = errorStream_.str();
3439 if ( result == ASE_OK ) return;
3440 error( RtAudioError::SYSTEM_ERROR );
3443 void RtApiAsio :: abortStream()
3446 if ( stream_.state == STREAM_STOPPED ) {
3447 errorText_ = "RtApiAsio::abortStream(): the stream is already stopped!";
3448 error( RtAudioError::WARNING );
3452 // The following lines were commented-out because some behavior was
3453 // noted where the device buffers need to be zeroed to avoid
3454 // continuing sound, even when the device buffers are completely
3455 // disposed. So now, calling abort is the same as calling stop.
3456 // AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3457 // handle->drainCounter = 2;
3461 // This function will be called by a spawned thread when the user
3462 // callback function signals that the stream should be stopped or
3463 // aborted. It is necessary to handle it this way because the
3464 // callbackEvent() function must return before the ASIOStop()
3465 // function will return.
3466 static unsigned __stdcall asioStopStream( void *ptr )
3468 CallbackInfo *info = (CallbackInfo *) ptr;
3469 RtApiAsio *object = (RtApiAsio *) info->object;
3471 object->stopStream();
3476 bool RtApiAsio :: callbackEvent( long bufferIndex )
3478 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
3479 if ( stream_.state == STREAM_CLOSED ) {
3480 errorText_ = "RtApiAsio::callbackEvent(): the stream is closed ... this shouldn't happen!";
3481 error( RtAudioError::WARNING );
3485 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
3486 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3488 // Check if we were draining the stream and signal if finished.
3489 if ( handle->drainCounter > 3 ) {
3491 stream_.state = STREAM_STOPPING;
3492 if ( handle->internalDrain == false )
3493 SetEvent( handle->condition );
3494 else { // spawn a thread to stop the stream
3496 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
3497 &stream_.callbackInfo, 0, &threadId );
3502 // Invoke user callback to get fresh output data UNLESS we are
3504 if ( handle->drainCounter == 0 ) {
3505 RtAudioCallback callback = (RtAudioCallback) info->callback;
3506 double streamTime = getStreamTime();
3507 RtAudioStreamStatus status = 0;
3508 if ( stream_.mode != INPUT && asioXRun == true ) {
3509 status |= RTAUDIO_OUTPUT_UNDERFLOW;
3512 if ( stream_.mode != OUTPUT && asioXRun == true ) {
3513 status |= RTAUDIO_INPUT_OVERFLOW;
3516 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
3517 stream_.bufferSize, streamTime, status, info->userData );
3518 if ( cbReturnValue == 2 ) {
3519 stream_.state = STREAM_STOPPING;
3520 handle->drainCounter = 2;
3522 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
3523 &stream_.callbackInfo, 0, &threadId );
3526 else if ( cbReturnValue == 1 ) {
3527 handle->drainCounter = 1;
3528 handle->internalDrain = true;
3532 unsigned int nChannels, bufferBytes, i, j;
3533 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
3534 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
3536 bufferBytes = stream_.bufferSize * formatBytes( stream_.deviceFormat[0] );
3538 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
3540 for ( i=0, j=0; i<nChannels; i++ ) {
3541 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3542 memset( handle->bufferInfos[i].buffers[bufferIndex], 0, bufferBytes );
3546 else if ( stream_.doConvertBuffer[0] ) {
3548 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
3549 if ( stream_.doByteSwap[0] )
3550 byteSwapBuffer( stream_.deviceBuffer,
3551 stream_.bufferSize * stream_.nDeviceChannels[0],
3552 stream_.deviceFormat[0] );
3554 for ( i=0, j=0; i<nChannels; i++ ) {
3555 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3556 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
3557 &stream_.deviceBuffer[j++*bufferBytes], bufferBytes );
3563 if ( stream_.doByteSwap[0] )
3564 byteSwapBuffer( stream_.userBuffer[0],
3565 stream_.bufferSize * stream_.nUserChannels[0],
3566 stream_.userFormat );
3568 for ( i=0, j=0; i<nChannels; i++ ) {
3569 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3570 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
3571 &stream_.userBuffer[0][bufferBytes*j++], bufferBytes );
3577 // Don't bother draining input
3578 if ( handle->drainCounter ) {
3579 handle->drainCounter++;
3583 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
3585 bufferBytes = stream_.bufferSize * formatBytes(stream_.deviceFormat[1]);
3587 if (stream_.doConvertBuffer[1]) {
3589 // Always interleave ASIO input data.
3590 for ( i=0, j=0; i<nChannels; i++ ) {
3591 if ( handle->bufferInfos[i].isInput == ASIOTrue )
3592 memcpy( &stream_.deviceBuffer[j++*bufferBytes],
3593 handle->bufferInfos[i].buffers[bufferIndex],
3597 if ( stream_.doByteSwap[1] )
3598 byteSwapBuffer( stream_.deviceBuffer,
3599 stream_.bufferSize * stream_.nDeviceChannels[1],
3600 stream_.deviceFormat[1] );
3601 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
3605 for ( i=0, j=0; i<nChannels; i++ ) {
3606 if ( handle->bufferInfos[i].isInput == ASIOTrue ) {
3607 memcpy( &stream_.userBuffer[1][bufferBytes*j++],
3608 handle->bufferInfos[i].buffers[bufferIndex],
3613 if ( stream_.doByteSwap[1] )
3614 byteSwapBuffer( stream_.userBuffer[1],
3615 stream_.bufferSize * stream_.nUserChannels[1],
3616 stream_.userFormat );
3621 // The following call was suggested by Malte Clasen. While the API
3622 // documentation indicates it should not be required, some device
3623 // drivers apparently do not function correctly without it.
3626 RtApi::tickStreamTime();
3630 static void sampleRateChanged( ASIOSampleRate sRate )
3632 // The ASIO documentation says that this usually only happens during
3633 // external sync. Audio processing is not stopped by the driver,
3634 // actual sample rate might not have even changed, maybe only the
3635 // sample rate status of an AES/EBU or S/PDIF digital input at the
3638 RtApi *object = (RtApi *) asioCallbackInfo->object;
3640 object->stopStream();
3642 catch ( RtAudioError &exception ) {
3643 std::cerr << "\nRtApiAsio: sampleRateChanged() error (" << exception.getMessage() << ")!\n" << std::endl;
3647 std::cerr << "\nRtApiAsio: driver reports sample rate changed to " << sRate << " ... stream stopped!!!\n" << std::endl;
3650 static long asioMessages( long selector, long value, void* /*message*/, double* /*opt*/ )
3654 switch( selector ) {
3655 case kAsioSelectorSupported:
3656 if ( value == kAsioResetRequest
3657 || value == kAsioEngineVersion
3658 || value == kAsioResyncRequest
3659 || value == kAsioLatenciesChanged
3660 // The following three were added for ASIO 2.0, you don't
3661 // necessarily have to support them.
3662 || value == kAsioSupportsTimeInfo
3663 || value == kAsioSupportsTimeCode
3664 || value == kAsioSupportsInputMonitor)
3667 case kAsioResetRequest:
3668 // Defer the task and perform the reset of the driver during the
3669 // next "safe" situation. You cannot reset the driver right now,
3670 // as this code is called from the driver. Reset the driver is
3671 // done by completely destruct is. I.e. ASIOStop(),
3672 // ASIODisposeBuffers(), Destruction Afterwards you initialize the
3674 std::cerr << "\nRtApiAsio: driver reset requested!!!" << std::endl;
3677 case kAsioResyncRequest:
3678 // This informs the application that the driver encountered some
3679 // non-fatal data loss. It is used for synchronization purposes
3680 // of different media. Added mainly to work around the Win16Mutex
3681 // problems in Windows 95/98 with the Windows Multimedia system,
3682 // which could lose data because the Mutex was held too long by
3683 // another thread. However a driver can issue it in other
3685 // std::cerr << "\nRtApiAsio: driver resync requested!!!" << std::endl;
3689 case kAsioLatenciesChanged:
3690 // This will inform the host application that the drivers were
3691 // latencies changed. Beware, it this does not mean that the
3692 // buffer sizes have changed! You might need to update internal
3694 std::cerr << "\nRtApiAsio: driver latency may have changed!!!" << std::endl;
3697 case kAsioEngineVersion:
3698 // Return the supported ASIO version of the host application. If
3699 // a host application does not implement this selector, ASIO 1.0
3700 // is assumed by the driver.
3703 case kAsioSupportsTimeInfo:
3704 // Informs the driver whether the
3705 // asioCallbacks.bufferSwitchTimeInfo() callback is supported.
3706 // For compatibility with ASIO 1.0 drivers the host application
3707 // should always support the "old" bufferSwitch method, too.
3710 case kAsioSupportsTimeCode:
3711 // Informs the driver whether application is interested in time
3712 // code info. If an application does not need to know about time
3713 // code, the driver has less work to do.
3720 static const char* getAsioErrorString( ASIOError result )
3728 static const Messages m[] =
3730 { ASE_NotPresent, "Hardware input or output is not present or available." },
3731 { ASE_HWMalfunction, "Hardware is malfunctioning." },
3732 { ASE_InvalidParameter, "Invalid input parameter." },
3733 { ASE_InvalidMode, "Invalid mode." },
3734 { ASE_SPNotAdvancing, "Sample position not advancing." },
3735 { ASE_NoClock, "Sample clock or rate cannot be determined or is not present." },
3736 { ASE_NoMemory, "Not enough memory to complete the request." }
3739 for ( unsigned int i = 0; i < sizeof(m)/sizeof(m[0]); ++i )
3740 if ( m[i].value == result ) return m[i].message;
3742 return "Unknown error.";
3745 //******************** End of __WINDOWS_ASIO__ *********************//
3749 #if defined(__WINDOWS_WASAPI__) // Windows WASAPI API
3751 // Authored by Marcus Tomlinson <themarcustomlinson@gmail.com>, April 2014
3752 // - Introduces support for the Windows WASAPI API
3753 // - Aims to deliver bit streams to and from hardware at the lowest possible latency, via the absolute minimum buffer sizes required
3754 // - Provides flexible stream configuration to an otherwise strict and inflexible WASAPI interface
3755 // - Includes automatic internal conversion of sample rate and buffer size between hardware and the user
3762 #include <mferror.h>
3764 #include <mftransform.h>
3765 #include <wmcodecdsp.h>
3767 #include <audioclient.h>
3769 #include <mmdeviceapi.h>
3770 #include <functiondiscoverykeys_devpkey.h>
3772 #ifndef MF_E_TRANSFORM_NEED_MORE_INPUT
3773 #define MF_E_TRANSFORM_NEED_MORE_INPUT _HRESULT_TYPEDEF_(0xc00d6d72)
3776 #ifndef MFSTARTUP_NOSOCKET
3777 #define MFSTARTUP_NOSOCKET 0x1
3781 #pragma comment( lib, "ksuser" )
3782 #pragma comment( lib, "mfplat.lib" )
3783 #pragma comment( lib, "mfuuid.lib" )
3784 #pragma comment( lib, "wmcodecdspuuid" )
3787 //=============================================================================
3789 #define SAFE_RELEASE( objectPtr )\
3792 objectPtr->Release();\
3796 typedef HANDLE ( __stdcall *TAvSetMmThreadCharacteristicsPtr )( LPCWSTR TaskName, LPDWORD TaskIndex );
3798 //-----------------------------------------------------------------------------
3800 // WASAPI dictates stream sample rate, format, channel count, and in some cases, buffer size.
3801 // Therefore we must perform all necessary conversions to user buffers in order to satisfy these
3802 // requirements. WasapiBuffer ring buffers are used between HwIn->UserIn and UserOut->HwOut to
3803 // provide intermediate storage for read / write synchronization.
3817 // sets the length of the internal ring buffer
3818 void setBufferSize( unsigned int bufferSize, unsigned int formatBytes ) {
3821 buffer_ = ( char* ) calloc( bufferSize, formatBytes );
3823 bufferSize_ = bufferSize;
3828 // attempt to push a buffer into the ring buffer at the current "in" index
3829 bool pushBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
3831 if ( !buffer || // incoming buffer is NULL
3832 bufferSize == 0 || // incoming buffer has no data
3833 bufferSize > bufferSize_ ) // incoming buffer too large
3838 unsigned int relOutIndex = outIndex_;
3839 unsigned int inIndexEnd = inIndex_ + bufferSize;
3840 if ( relOutIndex < inIndex_ && inIndexEnd >= bufferSize_ ) {
3841 relOutIndex += bufferSize_;
3844 // "in" index can end on the "out" index but cannot begin at it
3845 if ( inIndex_ < relOutIndex && inIndexEnd > relOutIndex ) {
3846 return false; // not enough space between "in" index and "out" index
3849 // copy buffer from external to internal
3850 int fromZeroSize = inIndex_ + bufferSize - bufferSize_;
3851 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
3852 int fromInSize = bufferSize - fromZeroSize;
3857 memcpy( &( ( char* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( char ) );
3858 memcpy( buffer_, &( ( char* ) buffer )[fromInSize], fromZeroSize * sizeof( char ) );
3860 case RTAUDIO_SINT16:
3861 memcpy( &( ( short* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( short ) );
3862 memcpy( buffer_, &( ( short* ) buffer )[fromInSize], fromZeroSize * sizeof( short ) );
3864 case RTAUDIO_SINT24:
3865 memcpy( &( ( S24* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( S24 ) );
3866 memcpy( buffer_, &( ( S24* ) buffer )[fromInSize], fromZeroSize * sizeof( S24 ) );
3868 case RTAUDIO_SINT32:
3869 memcpy( &( ( int* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( int ) );
3870 memcpy( buffer_, &( ( int* ) buffer )[fromInSize], fromZeroSize * sizeof( int ) );
3872 case RTAUDIO_FLOAT32:
3873 memcpy( &( ( float* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( float ) );
3874 memcpy( buffer_, &( ( float* ) buffer )[fromInSize], fromZeroSize * sizeof( float ) );
3876 case RTAUDIO_FLOAT64:
3877 memcpy( &( ( double* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( double ) );
3878 memcpy( buffer_, &( ( double* ) buffer )[fromInSize], fromZeroSize * sizeof( double ) );
3882 // update "in" index
3883 inIndex_ += bufferSize;
3884 inIndex_ %= bufferSize_;
3889 // attempt to pull a buffer from the ring buffer from the current "out" index
3890 bool pullBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
3892 if ( !buffer || // incoming buffer is NULL
3893 bufferSize == 0 || // incoming buffer has no data
3894 bufferSize > bufferSize_ ) // incoming buffer too large
3899 unsigned int relInIndex = inIndex_;
3900 unsigned int outIndexEnd = outIndex_ + bufferSize;
3901 if ( relInIndex < outIndex_ && outIndexEnd >= bufferSize_ ) {
3902 relInIndex += bufferSize_;
3905 // "out" index can begin at and end on the "in" index
3906 if ( outIndex_ <= relInIndex && outIndexEnd > relInIndex ) {
3907 return false; // not enough space between "out" index and "in" index
3910 // copy buffer from internal to external
3911 int fromZeroSize = outIndex_ + bufferSize - bufferSize_;
3912 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
3913 int fromOutSize = bufferSize - fromZeroSize;
3918 memcpy( buffer, &( ( char* ) buffer_ )[outIndex_], fromOutSize * sizeof( char ) );
3919 memcpy( &( ( char* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( char ) );
3921 case RTAUDIO_SINT16:
3922 memcpy( buffer, &( ( short* ) buffer_ )[outIndex_], fromOutSize * sizeof( short ) );
3923 memcpy( &( ( short* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( short ) );
3925 case RTAUDIO_SINT24:
3926 memcpy( buffer, &( ( S24* ) buffer_ )[outIndex_], fromOutSize * sizeof( S24 ) );
3927 memcpy( &( ( S24* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( S24 ) );
3929 case RTAUDIO_SINT32:
3930 memcpy( buffer, &( ( int* ) buffer_ )[outIndex_], fromOutSize * sizeof( int ) );
3931 memcpy( &( ( int* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( int ) );
3933 case RTAUDIO_FLOAT32:
3934 memcpy( buffer, &( ( float* ) buffer_ )[outIndex_], fromOutSize * sizeof( float ) );
3935 memcpy( &( ( float* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( float ) );
3937 case RTAUDIO_FLOAT64:
3938 memcpy( buffer, &( ( double* ) buffer_ )[outIndex_], fromOutSize * sizeof( double ) );
3939 memcpy( &( ( double* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( double ) );
3943 // update "out" index
3944 outIndex_ += bufferSize;
3945 outIndex_ %= bufferSize_;
3952 unsigned int bufferSize_;
3953 unsigned int inIndex_;
3954 unsigned int outIndex_;
3957 //-----------------------------------------------------------------------------
3959 // In order to satisfy WASAPI's buffer requirements, we need a means of converting sample rate
3960 // between HW and the user. The WasapiResampler class is used to perform this conversion between
3961 // HwIn->UserIn and UserOut->HwOut during the stream callback loop.
3962 class WasapiResampler
3965 WasapiResampler( bool isFloat, unsigned int bitsPerSample, unsigned int channelCount,
3966 unsigned int inSampleRate, unsigned int outSampleRate )
3967 : _bytesPerSample( bitsPerSample / 8 )
3968 , _channelCount( channelCount )
3969 , _sampleRatio( ( float ) outSampleRate / inSampleRate )
3970 , _transformUnk( NULL )
3971 , _transform( NULL )
3972 , _mediaType( NULL )
3973 , _inputMediaType( NULL )
3974 , _outputMediaType( NULL )
3976 #ifdef __IWMResamplerProps_FWD_DEFINED__
3977 , _resamplerProps( NULL )
3980 // 1. Initialization
3982 MFStartup( MF_VERSION, MFSTARTUP_NOSOCKET );
3984 // 2. Create Resampler Transform Object
3986 CoCreateInstance( CLSID_CResamplerMediaObject, NULL, CLSCTX_INPROC_SERVER,
3987 IID_IUnknown, ( void** ) &_transformUnk );
3989 _transformUnk->QueryInterface( IID_PPV_ARGS( &_transform ) );
3991 #ifdef __IWMResamplerProps_FWD_DEFINED__
3992 _transformUnk->QueryInterface( IID_PPV_ARGS( &_resamplerProps ) );
3993 _resamplerProps->SetHalfFilterLength( 60 ); // best conversion quality
3996 // 3. Specify input / output format
3998 MFCreateMediaType( &_mediaType );
3999 _mediaType->SetGUID( MF_MT_MAJOR_TYPE, MFMediaType_Audio );
4000 _mediaType->SetGUID( MF_MT_SUBTYPE, isFloat ? MFAudioFormat_Float : MFAudioFormat_PCM );
4001 _mediaType->SetUINT32( MF_MT_AUDIO_NUM_CHANNELS, channelCount );
4002 _mediaType->SetUINT32( MF_MT_AUDIO_SAMPLES_PER_SECOND, inSampleRate );
4003 _mediaType->SetUINT32( MF_MT_AUDIO_BLOCK_ALIGNMENT, _bytesPerSample * channelCount );
4004 _mediaType->SetUINT32( MF_MT_AUDIO_AVG_BYTES_PER_SECOND, _bytesPerSample * channelCount * inSampleRate );
4005 _mediaType->SetUINT32( MF_MT_AUDIO_BITS_PER_SAMPLE, bitsPerSample );
4006 _mediaType->SetUINT32( MF_MT_ALL_SAMPLES_INDEPENDENT, TRUE );
4008 MFCreateMediaType( &_inputMediaType );
4009 _mediaType->CopyAllItems( _inputMediaType );
4011 _transform->SetInputType( 0, _inputMediaType, 0 );
4013 MFCreateMediaType( &_outputMediaType );
4014 _mediaType->CopyAllItems( _outputMediaType );
4016 _outputMediaType->SetUINT32( MF_MT_AUDIO_SAMPLES_PER_SECOND, outSampleRate );
4017 _outputMediaType->SetUINT32( MF_MT_AUDIO_AVG_BYTES_PER_SECOND, _bytesPerSample * channelCount * outSampleRate );
4019 _transform->SetOutputType( 0, _outputMediaType, 0 );
4021 // 4. Send stream start messages to Resampler
4023 _transform->ProcessMessage( MFT_MESSAGE_COMMAND_FLUSH, 0 );
4024 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_BEGIN_STREAMING, 0 );
4025 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_START_OF_STREAM, 0 );
4030 // 8. Send stream stop messages to Resampler
4032 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_END_OF_STREAM, 0 );
4033 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_END_STREAMING, 0 );
4039 SAFE_RELEASE( _transformUnk );
4040 SAFE_RELEASE( _transform );
4041 SAFE_RELEASE( _mediaType );
4042 SAFE_RELEASE( _inputMediaType );
4043 SAFE_RELEASE( _outputMediaType );
4045 #ifdef __IWMResamplerProps_FWD_DEFINED__
4046 SAFE_RELEASE( _resamplerProps );
4050 void Convert( char* outBuffer, const char* inBuffer, unsigned int inSampleCount, unsigned int& outSampleCount )
4052 unsigned int inputBufferSize = _bytesPerSample * _channelCount * inSampleCount;
4053 if ( _sampleRatio == 1 )
4055 // no sample rate conversion required
4056 memcpy( outBuffer, inBuffer, inputBufferSize );
4057 outSampleCount = inSampleCount;
4061 unsigned int outputBufferSize = ( unsigned int ) ceilf( inputBufferSize * _sampleRatio ) + ( _bytesPerSample * _channelCount );
4063 IMFMediaBuffer* rInBuffer;
4064 IMFSample* rInSample;
4065 BYTE* rInByteBuffer = NULL;
4067 // 5. Create Sample object from input data
4069 MFCreateMemoryBuffer( inputBufferSize, &rInBuffer );
4071 rInBuffer->Lock( &rInByteBuffer, NULL, NULL );
4072 memcpy( rInByteBuffer, inBuffer, inputBufferSize );
4073 rInBuffer->Unlock();
4074 rInByteBuffer = NULL;
4076 rInBuffer->SetCurrentLength( inputBufferSize );
4078 MFCreateSample( &rInSample );
4079 rInSample->AddBuffer( rInBuffer );
4081 // 6. Pass input data to Resampler
4083 _transform->ProcessInput( 0, rInSample, 0 );
4085 SAFE_RELEASE( rInBuffer );
4086 SAFE_RELEASE( rInSample );
4088 // 7. Perform sample rate conversion
4090 IMFMediaBuffer* rOutBuffer = NULL;
4091 BYTE* rOutByteBuffer = NULL;
4093 MFT_OUTPUT_DATA_BUFFER rOutDataBuffer;
4095 DWORD rBytes = outputBufferSize; // maximum bytes accepted per ProcessOutput
4097 // 7.1 Create Sample object for output data
4099 memset( &rOutDataBuffer, 0, sizeof rOutDataBuffer );
4100 MFCreateSample( &( rOutDataBuffer.pSample ) );
4101 MFCreateMemoryBuffer( rBytes, &rOutBuffer );
4102 rOutDataBuffer.pSample->AddBuffer( rOutBuffer );
4103 rOutDataBuffer.dwStreamID = 0;
4104 rOutDataBuffer.dwStatus = 0;
4105 rOutDataBuffer.pEvents = NULL;
4107 // 7.2 Get output data from Resampler
4109 if ( _transform->ProcessOutput( 0, 1, &rOutDataBuffer, &rStatus ) == MF_E_TRANSFORM_NEED_MORE_INPUT )
4112 SAFE_RELEASE( rOutBuffer );
4113 SAFE_RELEASE( rOutDataBuffer.pSample );
4117 // 7.3 Write output data to outBuffer
4119 SAFE_RELEASE( rOutBuffer );
4120 rOutDataBuffer.pSample->ConvertToContiguousBuffer( &rOutBuffer );
4121 rOutBuffer->GetCurrentLength( &rBytes );
4123 rOutBuffer->Lock( &rOutByteBuffer, NULL, NULL );
4124 memcpy( outBuffer, rOutByteBuffer, rBytes );
4125 rOutBuffer->Unlock();
4126 rOutByteBuffer = NULL;
4128 outSampleCount = rBytes / _bytesPerSample / _channelCount;
4129 SAFE_RELEASE( rOutBuffer );
4130 SAFE_RELEASE( rOutDataBuffer.pSample );
4134 unsigned int _bytesPerSample;
4135 unsigned int _channelCount;
4138 IUnknown* _transformUnk;
4139 IMFTransform* _transform;
4140 IMFMediaType* _mediaType;
4141 IMFMediaType* _inputMediaType;
4142 IMFMediaType* _outputMediaType;
4144 #ifdef __IWMResamplerProps_FWD_DEFINED__
4145 IWMResamplerProps* _resamplerProps;
4149 //-----------------------------------------------------------------------------
4151 // A structure to hold various information related to the WASAPI implementation.
4154 IAudioClient* captureAudioClient;
4155 IAudioClient* renderAudioClient;
4156 IAudioCaptureClient* captureClient;
4157 IAudioRenderClient* renderClient;
4158 HANDLE captureEvent;
4162 : captureAudioClient( NULL ),
4163 renderAudioClient( NULL ),
4164 captureClient( NULL ),
4165 renderClient( NULL ),
4166 captureEvent( NULL ),
4167 renderEvent( NULL ) {}
4170 //=============================================================================
4172 RtApiWasapi::RtApiWasapi()
4173 : coInitialized_( false ), deviceEnumerator_( NULL )
4175 // WASAPI can run either apartment or multi-threaded
4176 HRESULT hr = CoInitialize( NULL );
4177 if ( !FAILED( hr ) )
4178 coInitialized_ = true;
4180 // Instantiate device enumerator
4181 hr = CoCreateInstance( __uuidof( MMDeviceEnumerator ), NULL,
4182 CLSCTX_ALL, __uuidof( IMMDeviceEnumerator ),
4183 ( void** ) &deviceEnumerator_ );
4185 // If this runs on an old Windows, it will fail. Ignore and proceed.
4187 deviceEnumerator_ = NULL;
4190 //-----------------------------------------------------------------------------
4192 RtApiWasapi::~RtApiWasapi()
4194 if ( stream_.state != STREAM_CLOSED )
4197 SAFE_RELEASE( deviceEnumerator_ );
4199 // If this object previously called CoInitialize()
4200 if ( coInitialized_ )
4204 //=============================================================================
4206 unsigned int RtApiWasapi::getDeviceCount( void )
4208 unsigned int captureDeviceCount = 0;
4209 unsigned int renderDeviceCount = 0;
4211 IMMDeviceCollection* captureDevices = NULL;
4212 IMMDeviceCollection* renderDevices = NULL;
4214 if ( !deviceEnumerator_ )
4217 // Count capture devices
4219 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4220 if ( FAILED( hr ) ) {
4221 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device collection.";
4225 hr = captureDevices->GetCount( &captureDeviceCount );
4226 if ( FAILED( hr ) ) {
4227 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device count.";
4231 // Count render devices
4232 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4233 if ( FAILED( hr ) ) {
4234 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device collection.";
4238 hr = renderDevices->GetCount( &renderDeviceCount );
4239 if ( FAILED( hr ) ) {
4240 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device count.";
4245 // release all references
4246 SAFE_RELEASE( captureDevices );
4247 SAFE_RELEASE( renderDevices );
4249 if ( errorText_.empty() )
4250 return captureDeviceCount + renderDeviceCount;
4252 error( RtAudioError::DRIVER_ERROR );
4256 //-----------------------------------------------------------------------------
4258 RtAudio::DeviceInfo RtApiWasapi::getDeviceInfo( unsigned int device )
4260 RtAudio::DeviceInfo info;
4261 unsigned int captureDeviceCount = 0;
4262 unsigned int renderDeviceCount = 0;
4263 std::string defaultDeviceName;
4264 bool isCaptureDevice = false;
4266 PROPVARIANT deviceNameProp;
4267 PROPVARIANT defaultDeviceNameProp;
4269 IMMDeviceCollection* captureDevices = NULL;
4270 IMMDeviceCollection* renderDevices = NULL;
4271 IMMDevice* devicePtr = NULL;
4272 IMMDevice* defaultDevicePtr = NULL;
4273 IAudioClient* audioClient = NULL;
4274 IPropertyStore* devicePropStore = NULL;
4275 IPropertyStore* defaultDevicePropStore = NULL;
4277 WAVEFORMATEX* deviceFormat = NULL;
4278 WAVEFORMATEX* closestMatchFormat = NULL;
4281 info.probed = false;
4283 // Count capture devices
4285 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4286 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4287 if ( FAILED( hr ) ) {
4288 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device collection.";
4292 hr = captureDevices->GetCount( &captureDeviceCount );
4293 if ( FAILED( hr ) ) {
4294 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device count.";
4298 // Count render devices
4299 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4300 if ( FAILED( hr ) ) {
4301 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device collection.";
4305 hr = renderDevices->GetCount( &renderDeviceCount );
4306 if ( FAILED( hr ) ) {
4307 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device count.";
4311 // validate device index
4312 if ( device >= captureDeviceCount + renderDeviceCount ) {
4313 errorText_ = "RtApiWasapi::getDeviceInfo: Invalid device index.";
4314 errorType = RtAudioError::INVALID_USE;
4318 // determine whether index falls within capture or render devices
4319 if ( device >= renderDeviceCount ) {
4320 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
4321 if ( FAILED( hr ) ) {
4322 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device handle.";
4325 isCaptureDevice = true;
4328 hr = renderDevices->Item( device, &devicePtr );
4329 if ( FAILED( hr ) ) {
4330 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device handle.";
4333 isCaptureDevice = false;
4336 // get default device name
4337 if ( isCaptureDevice ) {
4338 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eCapture, eConsole, &defaultDevicePtr );
4339 if ( FAILED( hr ) ) {
4340 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default capture device handle.";
4345 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eRender, eConsole, &defaultDevicePtr );
4346 if ( FAILED( hr ) ) {
4347 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default render device handle.";
4352 hr = defaultDevicePtr->OpenPropertyStore( STGM_READ, &defaultDevicePropStore );
4353 if ( FAILED( hr ) ) {
4354 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open default device property store.";
4357 PropVariantInit( &defaultDeviceNameProp );
4359 hr = defaultDevicePropStore->GetValue( PKEY_Device_FriendlyName, &defaultDeviceNameProp );
4360 if ( FAILED( hr ) ) {
4361 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default device property: PKEY_Device_FriendlyName.";
4365 defaultDeviceName = convertCharPointerToStdString(defaultDeviceNameProp.pwszVal);
4368 hr = devicePtr->OpenPropertyStore( STGM_READ, &devicePropStore );
4369 if ( FAILED( hr ) ) {
4370 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open device property store.";
4374 PropVariantInit( &deviceNameProp );
4376 hr = devicePropStore->GetValue( PKEY_Device_FriendlyName, &deviceNameProp );
4377 if ( FAILED( hr ) ) {
4378 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device property: PKEY_Device_FriendlyName.";
4382 info.name =convertCharPointerToStdString(deviceNameProp.pwszVal);
4385 if ( isCaptureDevice ) {
4386 info.isDefaultInput = info.name == defaultDeviceName;
4387 info.isDefaultOutput = false;
4390 info.isDefaultInput = false;
4391 info.isDefaultOutput = info.name == defaultDeviceName;
4395 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL, NULL, ( void** ) &audioClient );
4396 if ( FAILED( hr ) ) {
4397 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device audio client.";
4401 hr = audioClient->GetMixFormat( &deviceFormat );
4402 if ( FAILED( hr ) ) {
4403 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device mix format.";
4407 if ( isCaptureDevice ) {
4408 info.inputChannels = deviceFormat->nChannels;
4409 info.outputChannels = 0;
4410 info.duplexChannels = 0;
4413 info.inputChannels = 0;
4414 info.outputChannels = deviceFormat->nChannels;
4415 info.duplexChannels = 0;
4419 info.sampleRates.clear();
4421 // allow support for all sample rates as we have a built-in sample rate converter
4422 for ( unsigned int i = 0; i < MAX_SAMPLE_RATES; i++ ) {
4423 info.sampleRates.push_back( SAMPLE_RATES[i] );
4425 info.preferredSampleRate = deviceFormat->nSamplesPerSec;
4428 info.nativeFormats = 0;
4430 if ( deviceFormat->wFormatTag == WAVE_FORMAT_IEEE_FLOAT ||
4431 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
4432 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_IEEE_FLOAT ) )
4434 if ( deviceFormat->wBitsPerSample == 32 ) {
4435 info.nativeFormats |= RTAUDIO_FLOAT32;
4437 else if ( deviceFormat->wBitsPerSample == 64 ) {
4438 info.nativeFormats |= RTAUDIO_FLOAT64;
4441 else if ( deviceFormat->wFormatTag == WAVE_FORMAT_PCM ||
4442 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
4443 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_PCM ) )
4445 if ( deviceFormat->wBitsPerSample == 8 ) {
4446 info.nativeFormats |= RTAUDIO_SINT8;
4448 else if ( deviceFormat->wBitsPerSample == 16 ) {
4449 info.nativeFormats |= RTAUDIO_SINT16;
4451 else if ( deviceFormat->wBitsPerSample == 24 ) {
4452 info.nativeFormats |= RTAUDIO_SINT24;
4454 else if ( deviceFormat->wBitsPerSample == 32 ) {
4455 info.nativeFormats |= RTAUDIO_SINT32;
4463 // release all references
4464 PropVariantClear( &deviceNameProp );
4465 PropVariantClear( &defaultDeviceNameProp );
4467 SAFE_RELEASE( captureDevices );
4468 SAFE_RELEASE( renderDevices );
4469 SAFE_RELEASE( devicePtr );
4470 SAFE_RELEASE( defaultDevicePtr );
4471 SAFE_RELEASE( audioClient );
4472 SAFE_RELEASE( devicePropStore );
4473 SAFE_RELEASE( defaultDevicePropStore );
4475 CoTaskMemFree( deviceFormat );
4476 CoTaskMemFree( closestMatchFormat );
4478 if ( !errorText_.empty() )
4483 //-----------------------------------------------------------------------------
4485 unsigned int RtApiWasapi::getDefaultOutputDevice( void )
4487 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
4488 if ( getDeviceInfo( i ).isDefaultOutput ) {
4496 //-----------------------------------------------------------------------------
4498 unsigned int RtApiWasapi::getDefaultInputDevice( void )
4500 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
4501 if ( getDeviceInfo( i ).isDefaultInput ) {
4509 //-----------------------------------------------------------------------------
4511 void RtApiWasapi::closeStream( void )
4513 if ( stream_.state == STREAM_CLOSED ) {
4514 errorText_ = "RtApiWasapi::closeStream: No open stream to close.";
4515 error( RtAudioError::WARNING );
4519 if ( stream_.state != STREAM_STOPPED )
4522 // clean up stream memory
4523 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient )
4524 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient )
4526 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureClient )
4527 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderClient )
4529 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent )
4530 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent );
4532 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent )
4533 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent );
4535 delete ( WasapiHandle* ) stream_.apiHandle;
4536 stream_.apiHandle = NULL;
4538 for ( int i = 0; i < 2; i++ ) {
4539 if ( stream_.userBuffer[i] ) {
4540 free( stream_.userBuffer[i] );
4541 stream_.userBuffer[i] = 0;
4545 if ( stream_.deviceBuffer ) {
4546 free( stream_.deviceBuffer );
4547 stream_.deviceBuffer = 0;
4550 // update stream state
4551 stream_.state = STREAM_CLOSED;
4554 //-----------------------------------------------------------------------------
4556 void RtApiWasapi::startStream( void )
4560 if ( stream_.state == STREAM_RUNNING ) {
4561 errorText_ = "RtApiWasapi::startStream: The stream is already running.";
4562 error( RtAudioError::WARNING );
4566 #if defined( HAVE_GETTIMEOFDAY )
4567 gettimeofday( &stream_.lastTickTimestamp, NULL );
4570 // update stream state
4571 stream_.state = STREAM_RUNNING;
4573 // create WASAPI stream thread
4574 stream_.callbackInfo.thread = ( ThreadHandle ) CreateThread( NULL, 0, runWasapiThread, this, CREATE_SUSPENDED, NULL );
4576 if ( !stream_.callbackInfo.thread ) {
4577 errorText_ = "RtApiWasapi::startStream: Unable to instantiate callback thread.";
4578 error( RtAudioError::THREAD_ERROR );
4581 SetThreadPriority( ( void* ) stream_.callbackInfo.thread, stream_.callbackInfo.priority );
4582 ResumeThread( ( void* ) stream_.callbackInfo.thread );
4586 //-----------------------------------------------------------------------------
4588 void RtApiWasapi::stopStream( void )
4592 if ( stream_.state == STREAM_STOPPED ) {
4593 errorText_ = "RtApiWasapi::stopStream: The stream is already stopped.";
4594 error( RtAudioError::WARNING );
4598 // inform stream thread by setting stream state to STREAM_STOPPING
4599 stream_.state = STREAM_STOPPING;
4601 // wait until stream thread is stopped
4602 while( stream_.state != STREAM_STOPPED ) {
4606 // Wait for the last buffer to play before stopping.
4607 Sleep( 1000 * stream_.bufferSize / stream_.sampleRate );
4609 // close thread handle
4610 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
4611 errorText_ = "RtApiWasapi::stopStream: Unable to close callback thread.";
4612 error( RtAudioError::THREAD_ERROR );
4616 stream_.callbackInfo.thread = (ThreadHandle) NULL;
4619 //-----------------------------------------------------------------------------
4621 void RtApiWasapi::abortStream( void )
4625 if ( stream_.state == STREAM_STOPPED ) {
4626 errorText_ = "RtApiWasapi::abortStream: The stream is already stopped.";
4627 error( RtAudioError::WARNING );
4631 // inform stream thread by setting stream state to STREAM_STOPPING
4632 stream_.state = STREAM_STOPPING;
4634 // wait until stream thread is stopped
4635 while ( stream_.state != STREAM_STOPPED ) {
4639 // close thread handle
4640 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
4641 errorText_ = "RtApiWasapi::abortStream: Unable to close callback thread.";
4642 error( RtAudioError::THREAD_ERROR );
4646 stream_.callbackInfo.thread = (ThreadHandle) NULL;
4649 //-----------------------------------------------------------------------------
4651 bool RtApiWasapi::probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
4652 unsigned int firstChannel, unsigned int sampleRate,
4653 RtAudioFormat format, unsigned int* bufferSize,
4654 RtAudio::StreamOptions* options )
4656 bool methodResult = FAILURE;
4657 unsigned int captureDeviceCount = 0;
4658 unsigned int renderDeviceCount = 0;
4660 IMMDeviceCollection* captureDevices = NULL;
4661 IMMDeviceCollection* renderDevices = NULL;
4662 IMMDevice* devicePtr = NULL;
4663 WAVEFORMATEX* deviceFormat = NULL;
4664 unsigned int bufferBytes;
4665 stream_.state = STREAM_STOPPED;
4667 // create API Handle if not already created
4668 if ( !stream_.apiHandle )
4669 stream_.apiHandle = ( void* ) new WasapiHandle();
4671 // Count capture devices
4673 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4674 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4675 if ( FAILED( hr ) ) {
4676 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device collection.";
4680 hr = captureDevices->GetCount( &captureDeviceCount );
4681 if ( FAILED( hr ) ) {
4682 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device count.";
4686 // Count render devices
4687 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4688 if ( FAILED( hr ) ) {
4689 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device collection.";
4693 hr = renderDevices->GetCount( &renderDeviceCount );
4694 if ( FAILED( hr ) ) {
4695 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device count.";
4699 // validate device index
4700 if ( device >= captureDeviceCount + renderDeviceCount ) {
4701 errorType = RtAudioError::INVALID_USE;
4702 errorText_ = "RtApiWasapi::probeDeviceOpen: Invalid device index.";
4706 // if device index falls within capture devices
4707 if ( device >= renderDeviceCount ) {
4708 if ( mode != INPUT ) {
4709 errorType = RtAudioError::INVALID_USE;
4710 errorText_ = "RtApiWasapi::probeDeviceOpen: Capture device selected as output device.";
4714 // retrieve captureAudioClient from devicePtr
4715 IAudioClient*& captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4717 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
4718 if ( FAILED( hr ) ) {
4719 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device handle.";
4723 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4724 NULL, ( void** ) &captureAudioClient );
4725 if ( FAILED( hr ) ) {
4726 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device audio client.";
4730 hr = captureAudioClient->GetMixFormat( &deviceFormat );
4731 if ( FAILED( hr ) ) {
4732 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device mix format.";
4736 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4737 captureAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4740 // if device index falls within render devices and is configured for loopback
4741 if ( device < renderDeviceCount && mode == INPUT )
4743 // if renderAudioClient is not initialised, initialise it now
4744 IAudioClient*& renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4745 if ( !renderAudioClient )
4747 probeDeviceOpen( device, OUTPUT, channels, firstChannel, sampleRate, format, bufferSize, options );
4750 // retrieve captureAudioClient from devicePtr
4751 IAudioClient*& captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4753 hr = renderDevices->Item( device, &devicePtr );
4754 if ( FAILED( hr ) ) {
4755 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device handle.";
4759 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4760 NULL, ( void** ) &captureAudioClient );
4761 if ( FAILED( hr ) ) {
4762 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device audio client.";
4766 hr = captureAudioClient->GetMixFormat( &deviceFormat );
4767 if ( FAILED( hr ) ) {
4768 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device mix format.";
4772 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4773 captureAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4776 // if device index falls within render devices and is configured for output
4777 if ( device < renderDeviceCount && mode == OUTPUT )
4779 // if renderAudioClient is already initialised, don't initialise it again
4780 IAudioClient*& renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4781 if ( renderAudioClient )
4783 methodResult = SUCCESS;
4787 hr = renderDevices->Item( device, &devicePtr );
4788 if ( FAILED( hr ) ) {
4789 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device handle.";
4793 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4794 NULL, ( void** ) &renderAudioClient );
4795 if ( FAILED( hr ) ) {
4796 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device audio client.";
4800 hr = renderAudioClient->GetMixFormat( &deviceFormat );
4801 if ( FAILED( hr ) ) {
4802 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device mix format.";
4806 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4807 renderAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4811 if ( ( stream_.mode == OUTPUT && mode == INPUT ) ||
4812 ( stream_.mode == INPUT && mode == OUTPUT ) ) {
4813 stream_.mode = DUPLEX;
4816 stream_.mode = mode;
4819 stream_.device[mode] = device;
4820 stream_.doByteSwap[mode] = false;
4821 stream_.sampleRate = sampleRate;
4822 stream_.bufferSize = *bufferSize;
4823 stream_.nBuffers = 1;
4824 stream_.nUserChannels[mode] = channels;
4825 stream_.channelOffset[mode] = firstChannel;
4826 stream_.userFormat = format;
4827 stream_.deviceFormat[mode] = getDeviceInfo( device ).nativeFormats;
4829 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
4830 stream_.userInterleaved = false;
4832 stream_.userInterleaved = true;
4833 stream_.deviceInterleaved[mode] = true;
4835 // Set flags for buffer conversion.
4836 stream_.doConvertBuffer[mode] = false;
4837 if ( stream_.userFormat != stream_.deviceFormat[mode] ||
4838 stream_.nUserChannels[0] != stream_.nDeviceChannels[0] ||
4839 stream_.nUserChannels[1] != stream_.nDeviceChannels[1] )
4840 stream_.doConvertBuffer[mode] = true;
4841 else if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
4842 stream_.nUserChannels[mode] > 1 )
4843 stream_.doConvertBuffer[mode] = true;
4845 if ( stream_.doConvertBuffer[mode] )
4846 setConvertInfo( mode, 0 );
4848 // Allocate necessary internal buffers
4849 bufferBytes = stream_.nUserChannels[mode] * stream_.bufferSize * formatBytes( stream_.userFormat );
4851 stream_.userBuffer[mode] = ( char* ) calloc( bufferBytes, 1 );
4852 if ( !stream_.userBuffer[mode] ) {
4853 errorType = RtAudioError::MEMORY_ERROR;
4854 errorText_ = "RtApiWasapi::probeDeviceOpen: Error allocating user buffer memory.";
4858 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME )
4859 stream_.callbackInfo.priority = 15;
4861 stream_.callbackInfo.priority = 0;
4863 ///! TODO: RTAUDIO_MINIMIZE_LATENCY // Provide stream buffers directly to callback
4864 ///! TODO: RTAUDIO_HOG_DEVICE // Exclusive mode
4866 methodResult = SUCCESS;
4870 SAFE_RELEASE( captureDevices );
4871 SAFE_RELEASE( renderDevices );
4872 SAFE_RELEASE( devicePtr );
4873 CoTaskMemFree( deviceFormat );
4875 // if method failed, close the stream
4876 if ( methodResult == FAILURE )
4879 if ( !errorText_.empty() )
4881 return methodResult;
4884 //=============================================================================
4886 DWORD WINAPI RtApiWasapi::runWasapiThread( void* wasapiPtr )
4889 ( ( RtApiWasapi* ) wasapiPtr )->wasapiThread();
4894 DWORD WINAPI RtApiWasapi::stopWasapiThread( void* wasapiPtr )
4897 ( ( RtApiWasapi* ) wasapiPtr )->stopStream();
4902 DWORD WINAPI RtApiWasapi::abortWasapiThread( void* wasapiPtr )
4905 ( ( RtApiWasapi* ) wasapiPtr )->abortStream();
4910 //-----------------------------------------------------------------------------
4912 void RtApiWasapi::wasapiThread()
4914 // as this is a new thread, we must CoInitialize it
4915 CoInitialize( NULL );
4919 IAudioClient* captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4920 IAudioClient* renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4921 IAudioCaptureClient* captureClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureClient;
4922 IAudioRenderClient* renderClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderClient;
4923 HANDLE captureEvent = ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent;
4924 HANDLE renderEvent = ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent;
4926 WAVEFORMATEX* captureFormat = NULL;
4927 WAVEFORMATEX* renderFormat = NULL;
4928 float captureSrRatio = 0.0f;
4929 float renderSrRatio = 0.0f;
4930 WasapiBuffer captureBuffer;
4931 WasapiBuffer renderBuffer;
4932 WasapiResampler* captureResampler = NULL;
4933 WasapiResampler* renderResampler = NULL;
4935 // declare local stream variables
4936 RtAudioCallback callback = ( RtAudioCallback ) stream_.callbackInfo.callback;
4937 BYTE* streamBuffer = NULL;
4938 unsigned long captureFlags = 0;
4939 unsigned int bufferFrameCount = 0;
4940 unsigned int numFramesPadding = 0;
4941 unsigned int convBufferSize = 0;
4942 bool loopbackEnabled = stream_.device[INPUT] == stream_.device[OUTPUT];
4943 bool callbackPushed = true;
4944 bool callbackPulled = false;
4945 bool callbackStopped = false;
4946 int callbackResult = 0;
4948 // convBuffer is used to store converted buffers between WASAPI and the user
4949 char* convBuffer = NULL;
4950 unsigned int convBuffSize = 0;
4951 unsigned int deviceBuffSize = 0;
4953 std::string errorText;
4954 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4956 // Attempt to assign "Pro Audio" characteristic to thread
4957 HMODULE AvrtDll = LoadLibrary( (LPCTSTR) "AVRT.dll" );
4959 DWORD taskIndex = 0;
4960 TAvSetMmThreadCharacteristicsPtr AvSetMmThreadCharacteristicsPtr =
4961 ( TAvSetMmThreadCharacteristicsPtr ) (void(*)()) GetProcAddress( AvrtDll, "AvSetMmThreadCharacteristicsW" );
4962 AvSetMmThreadCharacteristicsPtr( L"Pro Audio", &taskIndex );
4963 FreeLibrary( AvrtDll );
4966 // start capture stream if applicable
4967 if ( captureAudioClient ) {
4968 hr = captureAudioClient->GetMixFormat( &captureFormat );
4969 if ( FAILED( hr ) ) {
4970 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
4974 // init captureResampler
4975 captureResampler = new WasapiResampler( stream_.deviceFormat[INPUT] == RTAUDIO_FLOAT32 || stream_.deviceFormat[INPUT] == RTAUDIO_FLOAT64,
4976 formatBytes( stream_.deviceFormat[INPUT] ) * 8, stream_.nDeviceChannels[INPUT],
4977 captureFormat->nSamplesPerSec, stream_.sampleRate );
4979 captureSrRatio = ( ( float ) captureFormat->nSamplesPerSec / stream_.sampleRate );
4981 if ( !captureClient ) {
4982 hr = captureAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
4983 loopbackEnabled ? AUDCLNT_STREAMFLAGS_LOOPBACK : AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
4988 if ( FAILED( hr ) ) {
4989 errorText = "RtApiWasapi::wasapiThread: Unable to initialize capture audio client.";
4993 hr = captureAudioClient->GetService( __uuidof( IAudioCaptureClient ),
4994 ( void** ) &captureClient );
4995 if ( FAILED( hr ) ) {
4996 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve capture client handle.";
5000 // don't configure captureEvent if in loopback mode
5001 if ( !loopbackEnabled )
5003 // configure captureEvent to trigger on every available capture buffer
5004 captureEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
5005 if ( !captureEvent ) {
5006 errorType = RtAudioError::SYSTEM_ERROR;
5007 errorText = "RtApiWasapi::wasapiThread: Unable to create capture event.";
5011 hr = captureAudioClient->SetEventHandle( captureEvent );
5012 if ( FAILED( hr ) ) {
5013 errorText = "RtApiWasapi::wasapiThread: Unable to set capture event handle.";
5017 ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent = captureEvent;
5020 ( ( WasapiHandle* ) stream_.apiHandle )->captureClient = captureClient;
5022 // reset the capture stream
5023 hr = captureAudioClient->Reset();
5024 if ( FAILED( hr ) ) {
5025 errorText = "RtApiWasapi::wasapiThread: Unable to reset capture stream.";
5029 // start the capture stream
5030 hr = captureAudioClient->Start();
5031 if ( FAILED( hr ) ) {
5032 errorText = "RtApiWasapi::wasapiThread: Unable to start capture stream.";
5037 unsigned int inBufferSize = 0;
5038 hr = captureAudioClient->GetBufferSize( &inBufferSize );
5039 if ( FAILED( hr ) ) {
5040 errorText = "RtApiWasapi::wasapiThread: Unable to get capture buffer size.";
5044 // scale outBufferSize according to stream->user sample rate ratio
5045 unsigned int outBufferSize = ( unsigned int ) ceilf( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT];
5046 inBufferSize *= stream_.nDeviceChannels[INPUT];
5048 // set captureBuffer size
5049 captureBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[INPUT] ) );
5052 // start render stream if applicable
5053 if ( renderAudioClient ) {
5054 hr = renderAudioClient->GetMixFormat( &renderFormat );
5055 if ( FAILED( hr ) ) {
5056 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
5060 // init renderResampler
5061 renderResampler = new WasapiResampler( stream_.deviceFormat[OUTPUT] == RTAUDIO_FLOAT32 || stream_.deviceFormat[OUTPUT] == RTAUDIO_FLOAT64,
5062 formatBytes( stream_.deviceFormat[OUTPUT] ) * 8, stream_.nDeviceChannels[OUTPUT],
5063 stream_.sampleRate, renderFormat->nSamplesPerSec );
5065 renderSrRatio = ( ( float ) renderFormat->nSamplesPerSec / stream_.sampleRate );
5067 if ( !renderClient ) {
5068 hr = renderAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
5069 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
5074 if ( FAILED( hr ) ) {
5075 errorText = "RtApiWasapi::wasapiThread: Unable to initialize render audio client.";
5079 hr = renderAudioClient->GetService( __uuidof( IAudioRenderClient ),
5080 ( void** ) &renderClient );
5081 if ( FAILED( hr ) ) {
5082 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve render client handle.";
5086 // configure renderEvent to trigger on every available render buffer
5087 renderEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
5088 if ( !renderEvent ) {
5089 errorType = RtAudioError::SYSTEM_ERROR;
5090 errorText = "RtApiWasapi::wasapiThread: Unable to create render event.";
5094 hr = renderAudioClient->SetEventHandle( renderEvent );
5095 if ( FAILED( hr ) ) {
5096 errorText = "RtApiWasapi::wasapiThread: Unable to set render event handle.";
5100 ( ( WasapiHandle* ) stream_.apiHandle )->renderClient = renderClient;
5101 ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent = renderEvent;
5103 // reset the render stream
5104 hr = renderAudioClient->Reset();
5105 if ( FAILED( hr ) ) {
5106 errorText = "RtApiWasapi::wasapiThread: Unable to reset render stream.";
5110 // start the render stream
5111 hr = renderAudioClient->Start();
5112 if ( FAILED( hr ) ) {
5113 errorText = "RtApiWasapi::wasapiThread: Unable to start render stream.";
5118 unsigned int outBufferSize = 0;
5119 hr = renderAudioClient->GetBufferSize( &outBufferSize );
5120 if ( FAILED( hr ) ) {
5121 errorText = "RtApiWasapi::wasapiThread: Unable to get render buffer size.";
5125 // scale inBufferSize according to user->stream sample rate ratio
5126 unsigned int inBufferSize = ( unsigned int ) ceilf( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT];
5127 outBufferSize *= stream_.nDeviceChannels[OUTPUT];
5129 // set renderBuffer size
5130 renderBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[OUTPUT] ) );
5133 // malloc buffer memory
5134 if ( stream_.mode == INPUT )
5136 using namespace std; // for ceilf
5137 convBuffSize = ( size_t ) ( ceilf( stream_.bufferSize * captureSrRatio ) ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
5138 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
5140 else if ( stream_.mode == OUTPUT )
5142 convBuffSize = ( size_t ) ( ceilf( stream_.bufferSize * renderSrRatio ) ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
5143 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
5145 else if ( stream_.mode == DUPLEX )
5147 convBuffSize = std::max( ( size_t ) ( ceilf( stream_.bufferSize * captureSrRatio ) ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
5148 ( size_t ) ( ceilf( stream_.bufferSize * renderSrRatio ) ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
5149 deviceBuffSize = std::max( stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
5150 stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
5153 convBuffSize *= 2; // allow overflow for *SrRatio remainders
5154 convBuffer = ( char* ) calloc( convBuffSize, 1 );
5155 stream_.deviceBuffer = ( char* ) calloc( deviceBuffSize, 1 );
5156 if ( !convBuffer || !stream_.deviceBuffer ) {
5157 errorType = RtAudioError::MEMORY_ERROR;
5158 errorText = "RtApiWasapi::wasapiThread: Error allocating device buffer memory.";
5162 // stream process loop
5163 while ( stream_.state != STREAM_STOPPING ) {
5164 if ( !callbackPulled ) {
5167 // 1. Pull callback buffer from inputBuffer
5168 // 2. If 1. was successful: Convert callback buffer to user sample rate and channel count
5169 // Convert callback buffer to user format
5171 if ( captureAudioClient )
5173 int samplesToPull = ( unsigned int ) floorf( stream_.bufferSize * captureSrRatio );
5174 if ( captureSrRatio != 1 )
5176 // account for remainders
5181 while ( convBufferSize < stream_.bufferSize )
5183 // Pull callback buffer from inputBuffer
5184 callbackPulled = captureBuffer.pullBuffer( convBuffer,
5185 samplesToPull * stream_.nDeviceChannels[INPUT],
5186 stream_.deviceFormat[INPUT] );
5188 if ( !callbackPulled )
5193 // Convert callback buffer to user sample rate
5194 unsigned int deviceBufferOffset = convBufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
5195 unsigned int convSamples = 0;
5197 captureResampler->Convert( stream_.deviceBuffer + deviceBufferOffset,
5202 convBufferSize += convSamples;
5203 samplesToPull = 1; // now pull one sample at a time until we have stream_.bufferSize samples
5206 if ( callbackPulled )
5208 if ( stream_.doConvertBuffer[INPUT] ) {
5209 // Convert callback buffer to user format
5210 convertBuffer( stream_.userBuffer[INPUT],
5211 stream_.deviceBuffer,
5212 stream_.convertInfo[INPUT] );
5215 // no further conversion, simple copy deviceBuffer to userBuffer
5216 memcpy( stream_.userBuffer[INPUT],
5217 stream_.deviceBuffer,
5218 stream_.bufferSize * stream_.nUserChannels[INPUT] * formatBytes( stream_.userFormat ) );
5223 // if there is no capture stream, set callbackPulled flag
5224 callbackPulled = true;
5229 // 1. Execute user callback method
5230 // 2. Handle return value from callback
5232 // if callback has not requested the stream to stop
5233 if ( callbackPulled && !callbackStopped ) {
5234 // Execute user callback method
5235 callbackResult = callback( stream_.userBuffer[OUTPUT],
5236 stream_.userBuffer[INPUT],
5239 captureFlags & AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY ? RTAUDIO_INPUT_OVERFLOW : 0,
5240 stream_.callbackInfo.userData );
5242 // Handle return value from callback
5243 if ( callbackResult == 1 ) {
5244 // instantiate a thread to stop this thread
5245 HANDLE threadHandle = CreateThread( NULL, 0, stopWasapiThread, this, 0, NULL );
5246 if ( !threadHandle ) {
5247 errorType = RtAudioError::THREAD_ERROR;
5248 errorText = "RtApiWasapi::wasapiThread: Unable to instantiate stream stop thread.";
5251 else if ( !CloseHandle( threadHandle ) ) {
5252 errorType = RtAudioError::THREAD_ERROR;
5253 errorText = "RtApiWasapi::wasapiThread: Unable to close stream stop thread handle.";
5257 callbackStopped = true;
5259 else if ( callbackResult == 2 ) {
5260 // instantiate a thread to stop this thread
5261 HANDLE threadHandle = CreateThread( NULL, 0, abortWasapiThread, this, 0, NULL );
5262 if ( !threadHandle ) {
5263 errorType = RtAudioError::THREAD_ERROR;
5264 errorText = "RtApiWasapi::wasapiThread: Unable to instantiate stream abort thread.";
5267 else if ( !CloseHandle( threadHandle ) ) {
5268 errorType = RtAudioError::THREAD_ERROR;
5269 errorText = "RtApiWasapi::wasapiThread: Unable to close stream abort thread handle.";
5273 callbackStopped = true;
5280 // 1. Convert callback buffer to stream format
5281 // 2. Convert callback buffer to stream sample rate and channel count
5282 // 3. Push callback buffer into outputBuffer
5284 if ( renderAudioClient && callbackPulled )
5286 // if the last call to renderBuffer.PushBuffer() was successful
5287 if ( callbackPushed || convBufferSize == 0 )
5289 if ( stream_.doConvertBuffer[OUTPUT] )
5291 // Convert callback buffer to stream format
5292 convertBuffer( stream_.deviceBuffer,
5293 stream_.userBuffer[OUTPUT],
5294 stream_.convertInfo[OUTPUT] );
5298 // no further conversion, simple copy userBuffer to deviceBuffer
5299 memcpy( stream_.deviceBuffer,
5300 stream_.userBuffer[OUTPUT],
5301 stream_.bufferSize * stream_.nUserChannels[OUTPUT] * formatBytes( stream_.userFormat ) );
5304 // Convert callback buffer to stream sample rate
5305 renderResampler->Convert( convBuffer,
5306 stream_.deviceBuffer,
5311 // Push callback buffer into outputBuffer
5312 callbackPushed = renderBuffer.pushBuffer( convBuffer,
5313 convBufferSize * stream_.nDeviceChannels[OUTPUT],
5314 stream_.deviceFormat[OUTPUT] );
5317 // if there is no render stream, set callbackPushed flag
5318 callbackPushed = true;
5323 // 1. Get capture buffer from stream
5324 // 2. Push capture buffer into inputBuffer
5325 // 3. If 2. was successful: Release capture buffer
5327 if ( captureAudioClient ) {
5328 // if the callback input buffer was not pulled from captureBuffer, wait for next capture event
5329 if ( !callbackPulled ) {
5330 WaitForSingleObject( loopbackEnabled ? renderEvent : captureEvent, INFINITE );
5333 // Get capture buffer from stream
5334 hr = captureClient->GetBuffer( &streamBuffer,
5336 &captureFlags, NULL, NULL );
5337 if ( FAILED( hr ) ) {
5338 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve capture buffer.";
5342 if ( bufferFrameCount != 0 ) {
5343 // Push capture buffer into inputBuffer
5344 if ( captureBuffer.pushBuffer( ( char* ) streamBuffer,
5345 bufferFrameCount * stream_.nDeviceChannels[INPUT],
5346 stream_.deviceFormat[INPUT] ) )
5348 // Release capture buffer
5349 hr = captureClient->ReleaseBuffer( bufferFrameCount );
5350 if ( FAILED( hr ) ) {
5351 errorText = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5357 // Inform WASAPI that capture was unsuccessful
5358 hr = captureClient->ReleaseBuffer( 0 );
5359 if ( FAILED( hr ) ) {
5360 errorText = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5367 // Inform WASAPI that capture was unsuccessful
5368 hr = captureClient->ReleaseBuffer( 0 );
5369 if ( FAILED( hr ) ) {
5370 errorText = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5378 // 1. Get render buffer from stream
5379 // 2. Pull next buffer from outputBuffer
5380 // 3. If 2. was successful: Fill render buffer with next buffer
5381 // Release render buffer
5383 if ( renderAudioClient ) {
5384 // if the callback output buffer was not pushed to renderBuffer, wait for next render event
5385 if ( callbackPulled && !callbackPushed ) {
5386 WaitForSingleObject( renderEvent, INFINITE );
5389 // Get render buffer from stream
5390 hr = renderAudioClient->GetBufferSize( &bufferFrameCount );
5391 if ( FAILED( hr ) ) {
5392 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer size.";
5396 hr = renderAudioClient->GetCurrentPadding( &numFramesPadding );
5397 if ( FAILED( hr ) ) {
5398 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer padding.";
5402 bufferFrameCount -= numFramesPadding;
5404 if ( bufferFrameCount != 0 ) {
5405 hr = renderClient->GetBuffer( bufferFrameCount, &streamBuffer );
5406 if ( FAILED( hr ) ) {
5407 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer.";
5411 // Pull next buffer from outputBuffer
5412 // Fill render buffer with next buffer
5413 if ( renderBuffer.pullBuffer( ( char* ) streamBuffer,
5414 bufferFrameCount * stream_.nDeviceChannels[OUTPUT],
5415 stream_.deviceFormat[OUTPUT] ) )
5417 // Release render buffer
5418 hr = renderClient->ReleaseBuffer( bufferFrameCount, 0 );
5419 if ( FAILED( hr ) ) {
5420 errorText = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5426 // Inform WASAPI that render was unsuccessful
5427 hr = renderClient->ReleaseBuffer( 0, 0 );
5428 if ( FAILED( hr ) ) {
5429 errorText = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5436 // Inform WASAPI that render was unsuccessful
5437 hr = renderClient->ReleaseBuffer( 0, 0 );
5438 if ( FAILED( hr ) ) {
5439 errorText = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5445 // if the callback buffer was pushed renderBuffer reset callbackPulled flag
5446 if ( callbackPushed ) {
5447 // unsetting the callbackPulled flag lets the stream know that
5448 // the audio device is ready for another callback output buffer.
5449 callbackPulled = false;
5452 RtApi::tickStreamTime();
5459 CoTaskMemFree( captureFormat );
5460 CoTaskMemFree( renderFormat );
5462 free ( convBuffer );
5463 delete renderResampler;
5464 delete captureResampler;
5468 // update stream state
5469 stream_.state = STREAM_STOPPED;
5471 if ( !errorText.empty() )
5473 errorText_ = errorText;
5478 //******************** End of __WINDOWS_WASAPI__ *********************//
5482 #if defined(__WINDOWS_DS__) // Windows DirectSound API
5484 // Modified by Robin Davies, October 2005
5485 // - Improvements to DirectX pointer chasing.
5486 // - Bug fix for non-power-of-two Asio granularity used by Edirol PCR-A30.
5487 // - Auto-call CoInitialize for DSOUND and ASIO platforms.
5488 // Various revisions for RtAudio 4.0 by Gary Scavone, April 2007
5489 // Changed device query structure for RtAudio 4.0.7, January 2010
5491 #include <windows.h>
5492 #include <process.h>
5493 #include <mmsystem.h>
5497 #include <algorithm>
5499 #if defined(__MINGW32__)
5500 // missing from latest mingw winapi
5501 #define WAVE_FORMAT_96M08 0x00010000 /* 96 kHz, Mono, 8-bit */
5502 #define WAVE_FORMAT_96S08 0x00020000 /* 96 kHz, Stereo, 8-bit */
5503 #define WAVE_FORMAT_96M16 0x00040000 /* 96 kHz, Mono, 16-bit */
5504 #define WAVE_FORMAT_96S16 0x00080000 /* 96 kHz, Stereo, 16-bit */
5507 #define MINIMUM_DEVICE_BUFFER_SIZE 32768
5509 #ifdef _MSC_VER // if Microsoft Visual C++
5510 #pragma comment( lib, "winmm.lib" ) // then, auto-link winmm.lib. Otherwise, it has to be added manually.
5513 static inline DWORD dsPointerBetween( DWORD pointer, DWORD laterPointer, DWORD earlierPointer, DWORD bufferSize )
5515 if ( pointer > bufferSize ) pointer -= bufferSize;
5516 if ( laterPointer < earlierPointer ) laterPointer += bufferSize;
5517 if ( pointer < earlierPointer ) pointer += bufferSize;
5518 return pointer >= earlierPointer && pointer < laterPointer;
5521 // A structure to hold various information related to the DirectSound
5522 // API implementation.
5524 unsigned int drainCounter; // Tracks callback counts when draining
5525 bool internalDrain; // Indicates if stop is initiated from callback or not.
5529 UINT bufferPointer[2];
5530 DWORD dsBufferSize[2];
5531 DWORD dsPointerLeadTime[2]; // the number of bytes ahead of the safe pointer to lead by.
5535 :drainCounter(0), internalDrain(false) { id[0] = 0; id[1] = 0; buffer[0] = 0; buffer[1] = 0; xrun[0] = false; xrun[1] = false; bufferPointer[0] = 0; bufferPointer[1] = 0; }
5538 // Declarations for utility functions, callbacks, and structures
5539 // specific to the DirectSound implementation.
5540 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
5541 LPCTSTR description,
5545 static const char* getErrorString( int code );
5547 static unsigned __stdcall callbackHandler( void *ptr );
5556 : found(false) { validId[0] = false; validId[1] = false; }
5559 struct DsProbeData {
5561 std::vector<struct DsDevice>* dsDevices;
5564 RtApiDs :: RtApiDs()
5566 // Dsound will run both-threaded. If CoInitialize fails, then just
5567 // accept whatever the mainline chose for a threading model.
5568 coInitialized_ = false;
5569 HRESULT hr = CoInitialize( NULL );
5570 if ( !FAILED( hr ) ) coInitialized_ = true;
5573 RtApiDs :: ~RtApiDs()
5575 if ( stream_.state != STREAM_CLOSED ) closeStream();
5576 if ( coInitialized_ ) CoUninitialize(); // balanced call.
5579 // The DirectSound default output is always the first device.
5580 unsigned int RtApiDs :: getDefaultOutputDevice( void )
5585 // The DirectSound default input is always the first input device,
5586 // which is the first capture device enumerated.
5587 unsigned int RtApiDs :: getDefaultInputDevice( void )
5592 unsigned int RtApiDs :: getDeviceCount( void )
5594 // Set query flag for previously found devices to false, so that we
5595 // can check for any devices that have disappeared.
5596 for ( unsigned int i=0; i<dsDevices.size(); i++ )
5597 dsDevices[i].found = false;
5599 // Query DirectSound devices.
5600 struct DsProbeData probeInfo;
5601 probeInfo.isInput = false;
5602 probeInfo.dsDevices = &dsDevices;
5603 HRESULT result = DirectSoundEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
5604 if ( FAILED( result ) ) {
5605 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating output devices!";
5606 errorText_ = errorStream_.str();
5607 error( RtAudioError::WARNING );
5610 // Query DirectSoundCapture devices.
5611 probeInfo.isInput = true;
5612 result = DirectSoundCaptureEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
5613 if ( FAILED( result ) ) {
5614 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating input devices!";
5615 errorText_ = errorStream_.str();
5616 error( RtAudioError::WARNING );
5619 // Clean out any devices that may have disappeared (code update submitted by Eli Zehngut).
5620 for ( unsigned int i=0; i<dsDevices.size(); ) {
5621 if ( dsDevices[i].found == false ) dsDevices.erase( dsDevices.begin() + i );
5625 return static_cast<unsigned int>(dsDevices.size());
5628 RtAudio::DeviceInfo RtApiDs :: getDeviceInfo( unsigned int device )
5630 RtAudio::DeviceInfo info;
5631 info.probed = false;
5633 if ( dsDevices.size() == 0 ) {
5634 // Force a query of all devices
5636 if ( dsDevices.size() == 0 ) {
5637 errorText_ = "RtApiDs::getDeviceInfo: no devices found!";
5638 error( RtAudioError::INVALID_USE );
5643 if ( device >= dsDevices.size() ) {
5644 errorText_ = "RtApiDs::getDeviceInfo: device ID is invalid!";
5645 error( RtAudioError::INVALID_USE );
5650 if ( dsDevices[ device ].validId[0] == false ) goto probeInput;
5652 LPDIRECTSOUND output;
5654 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
5655 if ( FAILED( result ) ) {
5656 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
5657 errorText_ = errorStream_.str();
5658 error( RtAudioError::WARNING );
5662 outCaps.dwSize = sizeof( outCaps );
5663 result = output->GetCaps( &outCaps );
5664 if ( FAILED( result ) ) {
5666 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting capabilities!";
5667 errorText_ = errorStream_.str();
5668 error( RtAudioError::WARNING );
5672 // Get output channel information.
5673 info.outputChannels = ( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ? 2 : 1;
5675 // Get sample rate information.
5676 info.sampleRates.clear();
5677 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
5678 if ( SAMPLE_RATES[k] >= (unsigned int) outCaps.dwMinSecondarySampleRate &&
5679 SAMPLE_RATES[k] <= (unsigned int) outCaps.dwMaxSecondarySampleRate ) {
5680 info.sampleRates.push_back( SAMPLE_RATES[k] );
5682 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
5683 info.preferredSampleRate = SAMPLE_RATES[k];
5687 // Get format information.
5688 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT ) info.nativeFormats |= RTAUDIO_SINT16;
5689 if ( outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) info.nativeFormats |= RTAUDIO_SINT8;
5693 if ( getDefaultOutputDevice() == device )
5694 info.isDefaultOutput = true;
5696 if ( dsDevices[ device ].validId[1] == false ) {
5697 info.name = dsDevices[ device ].name;
5704 LPDIRECTSOUNDCAPTURE input;
5705 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
5706 if ( FAILED( result ) ) {
5707 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
5708 errorText_ = errorStream_.str();
5709 error( RtAudioError::WARNING );
5714 inCaps.dwSize = sizeof( inCaps );
5715 result = input->GetCaps( &inCaps );
5716 if ( FAILED( result ) ) {
5718 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting object capabilities (" << dsDevices[ device ].name << ")!";
5719 errorText_ = errorStream_.str();
5720 error( RtAudioError::WARNING );
5724 // Get input channel information.
5725 info.inputChannels = inCaps.dwChannels;
5727 // Get sample rate and format information.
5728 std::vector<unsigned int> rates;
5729 if ( inCaps.dwChannels >= 2 ) {
5730 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5731 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5732 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5733 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5734 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5735 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5736 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5737 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5739 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
5740 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) rates.push_back( 11025 );
5741 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) rates.push_back( 22050 );
5742 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) rates.push_back( 44100 );
5743 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) rates.push_back( 96000 );
5745 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
5746 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) rates.push_back( 11025 );
5747 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) rates.push_back( 22050 );
5748 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) rates.push_back( 44100 );
5749 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) rates.push_back( 96000 );
5752 else if ( inCaps.dwChannels == 1 ) {
5753 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5754 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5755 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5756 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5757 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5758 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5759 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5760 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5762 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
5763 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) rates.push_back( 11025 );
5764 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) rates.push_back( 22050 );
5765 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) rates.push_back( 44100 );
5766 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) rates.push_back( 96000 );
5768 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
5769 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) rates.push_back( 11025 );
5770 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) rates.push_back( 22050 );
5771 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) rates.push_back( 44100 );
5772 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) rates.push_back( 96000 );
5775 else info.inputChannels = 0; // technically, this would be an error
5779 if ( info.inputChannels == 0 ) return info;
5781 // Copy the supported rates to the info structure but avoid duplication.
5783 for ( unsigned int i=0; i<rates.size(); i++ ) {
5785 for ( unsigned int j=0; j<info.sampleRates.size(); j++ ) {
5786 if ( rates[i] == info.sampleRates[j] ) {
5791 if ( found == false ) info.sampleRates.push_back( rates[i] );
5793 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
5795 // If device opens for both playback and capture, we determine the channels.
5796 if ( info.outputChannels > 0 && info.inputChannels > 0 )
5797 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
5799 if ( device == 0 ) info.isDefaultInput = true;
5801 // Copy name and return.
5802 info.name = dsDevices[ device ].name;
5807 bool RtApiDs :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
5808 unsigned int firstChannel, unsigned int sampleRate,
5809 RtAudioFormat format, unsigned int *bufferSize,
5810 RtAudio::StreamOptions *options )
5812 if ( channels + firstChannel > 2 ) {
5813 errorText_ = "RtApiDs::probeDeviceOpen: DirectSound does not support more than 2 channels per device.";
5817 size_t nDevices = dsDevices.size();
5818 if ( nDevices == 0 ) {
5819 // This should not happen because a check is made before this function is called.
5820 errorText_ = "RtApiDs::probeDeviceOpen: no devices found!";
5824 if ( device >= nDevices ) {
5825 // This should not happen because a check is made before this function is called.
5826 errorText_ = "RtApiDs::probeDeviceOpen: device ID is invalid!";
5830 if ( mode == OUTPUT ) {
5831 if ( dsDevices[ device ].validId[0] == false ) {
5832 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support output!";
5833 errorText_ = errorStream_.str();
5837 else { // mode == INPUT
5838 if ( dsDevices[ device ].validId[1] == false ) {
5839 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support input!";
5840 errorText_ = errorStream_.str();
5845 // According to a note in PortAudio, using GetDesktopWindow()
5846 // instead of GetForegroundWindow() is supposed to avoid problems
5847 // that occur when the application's window is not the foreground
5848 // window. Also, if the application window closes before the
5849 // DirectSound buffer, DirectSound can crash. In the past, I had
5850 // problems when using GetDesktopWindow() but it seems fine now
5851 // (January 2010). I'll leave it commented here.
5852 // HWND hWnd = GetForegroundWindow();
5853 HWND hWnd = GetDesktopWindow();
5855 // Check the numberOfBuffers parameter and limit the lowest value to
5856 // two. This is a judgement call and a value of two is probably too
5857 // low for capture, but it should work for playback.
5859 if ( options ) nBuffers = options->numberOfBuffers;
5860 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) nBuffers = 2;
5861 if ( nBuffers < 2 ) nBuffers = 3;
5863 // Check the lower range of the user-specified buffer size and set
5864 // (arbitrarily) to a lower bound of 32.
5865 if ( *bufferSize < 32 ) *bufferSize = 32;
5867 // Create the wave format structure. The data format setting will
5868 // be determined later.
5869 WAVEFORMATEX waveFormat;
5870 ZeroMemory( &waveFormat, sizeof(WAVEFORMATEX) );
5871 waveFormat.wFormatTag = WAVE_FORMAT_PCM;
5872 waveFormat.nChannels = channels + firstChannel;
5873 waveFormat.nSamplesPerSec = (unsigned long) sampleRate;
5875 // Determine the device buffer size. By default, we'll use the value
5876 // defined above (32K), but we will grow it to make allowances for
5877 // very large software buffer sizes.
5878 DWORD dsBufferSize = MINIMUM_DEVICE_BUFFER_SIZE;
5879 DWORD dsPointerLeadTime = 0;
5881 void *ohandle = 0, *bhandle = 0;
5883 if ( mode == OUTPUT ) {
5885 LPDIRECTSOUND output;
5886 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
5887 if ( FAILED( result ) ) {
5888 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
5889 errorText_ = errorStream_.str();
5894 outCaps.dwSize = sizeof( outCaps );
5895 result = output->GetCaps( &outCaps );
5896 if ( FAILED( result ) ) {
5898 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting capabilities (" << dsDevices[ device ].name << ")!";
5899 errorText_ = errorStream_.str();
5903 // Check channel information.
5904 if ( channels + firstChannel == 2 && !( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ) {
5905 errorStream_ << "RtApiDs::getDeviceInfo: the output device (" << dsDevices[ device ].name << ") does not support stereo playback.";
5906 errorText_ = errorStream_.str();
5910 // Check format information. Use 16-bit format unless not
5911 // supported or user requests 8-bit.
5912 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT &&
5913 !( format == RTAUDIO_SINT8 && outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) ) {
5914 waveFormat.wBitsPerSample = 16;
5915 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
5918 waveFormat.wBitsPerSample = 8;
5919 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
5921 stream_.userFormat = format;
5923 // Update wave format structure and buffer information.
5924 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
5925 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
5926 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
5928 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
5929 while ( dsPointerLeadTime * 2U > dsBufferSize )
5932 // Set cooperative level to DSSCL_EXCLUSIVE ... sound stops when window focus changes.
5933 // result = output->SetCooperativeLevel( hWnd, DSSCL_EXCLUSIVE );
5934 // Set cooperative level to DSSCL_PRIORITY ... sound remains when window focus changes.
5935 result = output->SetCooperativeLevel( hWnd, DSSCL_PRIORITY );
5936 if ( FAILED( result ) ) {
5938 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting cooperative level (" << dsDevices[ device ].name << ")!";
5939 errorText_ = errorStream_.str();
5943 // Even though we will write to the secondary buffer, we need to
5944 // access the primary buffer to set the correct output format
5945 // (since the default is 8-bit, 22 kHz!). Setup the DS primary
5946 // buffer description.
5947 DSBUFFERDESC bufferDescription;
5948 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
5949 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
5950 bufferDescription.dwFlags = DSBCAPS_PRIMARYBUFFER;
5952 // Obtain the primary buffer
5953 LPDIRECTSOUNDBUFFER buffer;
5954 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
5955 if ( FAILED( result ) ) {
5957 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") accessing primary buffer (" << dsDevices[ device ].name << ")!";
5958 errorText_ = errorStream_.str();
5962 // Set the primary DS buffer sound format.
5963 result = buffer->SetFormat( &waveFormat );
5964 if ( FAILED( result ) ) {
5966 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting primary buffer format (" << dsDevices[ device ].name << ")!";
5967 errorText_ = errorStream_.str();
5971 // Setup the secondary DS buffer description.
5972 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
5973 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
5974 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
5975 DSBCAPS_GLOBALFOCUS |
5976 DSBCAPS_GETCURRENTPOSITION2 |
5977 DSBCAPS_LOCHARDWARE ); // Force hardware mixing
5978 bufferDescription.dwBufferBytes = dsBufferSize;
5979 bufferDescription.lpwfxFormat = &waveFormat;
5981 // Try to create the secondary DS buffer. If that doesn't work,
5982 // try to use software mixing. Otherwise, there's a problem.
5983 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
5984 if ( FAILED( result ) ) {
5985 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
5986 DSBCAPS_GLOBALFOCUS |
5987 DSBCAPS_GETCURRENTPOSITION2 |
5988 DSBCAPS_LOCSOFTWARE ); // Force software mixing
5989 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
5990 if ( FAILED( result ) ) {
5992 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating secondary buffer (" << dsDevices[ device ].name << ")!";
5993 errorText_ = errorStream_.str();
5998 // Get the buffer size ... might be different from what we specified.
6000 dsbcaps.dwSize = sizeof( DSBCAPS );
6001 result = buffer->GetCaps( &dsbcaps );
6002 if ( FAILED( result ) ) {
6005 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
6006 errorText_ = errorStream_.str();
6010 dsBufferSize = dsbcaps.dwBufferBytes;
6012 // Lock the DS buffer
6015 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
6016 if ( FAILED( result ) ) {
6019 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking buffer (" << dsDevices[ device ].name << ")!";
6020 errorText_ = errorStream_.str();
6024 // Zero the DS buffer
6025 ZeroMemory( audioPtr, dataLen );
6027 // Unlock the DS buffer
6028 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6029 if ( FAILED( result ) ) {
6032 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking buffer (" << dsDevices[ device ].name << ")!";
6033 errorText_ = errorStream_.str();
6037 ohandle = (void *) output;
6038 bhandle = (void *) buffer;
6041 if ( mode == INPUT ) {
6043 LPDIRECTSOUNDCAPTURE input;
6044 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
6045 if ( FAILED( result ) ) {
6046 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
6047 errorText_ = errorStream_.str();
6052 inCaps.dwSize = sizeof( inCaps );
6053 result = input->GetCaps( &inCaps );
6054 if ( FAILED( result ) ) {
6056 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting input capabilities (" << dsDevices[ device ].name << ")!";
6057 errorText_ = errorStream_.str();
6061 // Check channel information.
6062 if ( inCaps.dwChannels < channels + firstChannel ) {
6063 errorText_ = "RtApiDs::getDeviceInfo: the input device does not support requested input channels.";
6067 // Check format information. Use 16-bit format unless user
6069 DWORD deviceFormats;
6070 if ( channels + firstChannel == 2 ) {
6071 deviceFormats = WAVE_FORMAT_1S08 | WAVE_FORMAT_2S08 | WAVE_FORMAT_4S08 | WAVE_FORMAT_96S08;
6072 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
6073 waveFormat.wBitsPerSample = 8;
6074 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
6076 else { // assume 16-bit is supported
6077 waveFormat.wBitsPerSample = 16;
6078 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
6081 else { // channel == 1
6082 deviceFormats = WAVE_FORMAT_1M08 | WAVE_FORMAT_2M08 | WAVE_FORMAT_4M08 | WAVE_FORMAT_96M08;
6083 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
6084 waveFormat.wBitsPerSample = 8;
6085 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
6087 else { // assume 16-bit is supported
6088 waveFormat.wBitsPerSample = 16;
6089 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
6092 stream_.userFormat = format;
6094 // Update wave format structure and buffer information.
6095 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
6096 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
6097 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
6099 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
6100 while ( dsPointerLeadTime * 2U > dsBufferSize )
6103 // Setup the secondary DS buffer description.
6104 DSCBUFFERDESC bufferDescription;
6105 ZeroMemory( &bufferDescription, sizeof( DSCBUFFERDESC ) );
6106 bufferDescription.dwSize = sizeof( DSCBUFFERDESC );
6107 bufferDescription.dwFlags = 0;
6108 bufferDescription.dwReserved = 0;
6109 bufferDescription.dwBufferBytes = dsBufferSize;
6110 bufferDescription.lpwfxFormat = &waveFormat;
6112 // Create the capture buffer.
6113 LPDIRECTSOUNDCAPTUREBUFFER buffer;
6114 result = input->CreateCaptureBuffer( &bufferDescription, &buffer, NULL );
6115 if ( FAILED( result ) ) {
6117 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating input buffer (" << dsDevices[ device ].name << ")!";
6118 errorText_ = errorStream_.str();
6122 // Get the buffer size ... might be different from what we specified.
6124 dscbcaps.dwSize = sizeof( DSCBCAPS );
6125 result = buffer->GetCaps( &dscbcaps );
6126 if ( FAILED( result ) ) {
6129 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
6130 errorText_ = errorStream_.str();
6134 dsBufferSize = dscbcaps.dwBufferBytes;
6136 // NOTE: We could have a problem here if this is a duplex stream
6137 // and the play and capture hardware buffer sizes are different
6138 // (I'm actually not sure if that is a problem or not).
6139 // Currently, we are not verifying that.
6141 // Lock the capture buffer
6144 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
6145 if ( FAILED( result ) ) {
6148 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking input buffer (" << dsDevices[ device ].name << ")!";
6149 errorText_ = errorStream_.str();
6154 ZeroMemory( audioPtr, dataLen );
6156 // Unlock the buffer
6157 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6158 if ( FAILED( result ) ) {
6161 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking input buffer (" << dsDevices[ device ].name << ")!";
6162 errorText_ = errorStream_.str();
6166 ohandle = (void *) input;
6167 bhandle = (void *) buffer;
6170 // Set various stream parameters
6171 DsHandle *handle = 0;
6172 stream_.nDeviceChannels[mode] = channels + firstChannel;
6173 stream_.nUserChannels[mode] = channels;
6174 stream_.bufferSize = *bufferSize;
6175 stream_.channelOffset[mode] = firstChannel;
6176 stream_.deviceInterleaved[mode] = true;
6177 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
6178 else stream_.userInterleaved = true;
6180 // Set flag for buffer conversion
6181 stream_.doConvertBuffer[mode] = false;
6182 if (stream_.nUserChannels[mode] != stream_.nDeviceChannels[mode])
6183 stream_.doConvertBuffer[mode] = true;
6184 if (stream_.userFormat != stream_.deviceFormat[mode])
6185 stream_.doConvertBuffer[mode] = true;
6186 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
6187 stream_.nUserChannels[mode] > 1 )
6188 stream_.doConvertBuffer[mode] = true;
6190 // Allocate necessary internal buffers
6191 long bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
6192 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
6193 if ( stream_.userBuffer[mode] == NULL ) {
6194 errorText_ = "RtApiDs::probeDeviceOpen: error allocating user buffer memory.";
6198 if ( stream_.doConvertBuffer[mode] ) {
6200 bool makeBuffer = true;
6201 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
6202 if ( mode == INPUT ) {
6203 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
6204 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
6205 if ( bufferBytes <= (long) bytesOut ) makeBuffer = false;
6210 bufferBytes *= *bufferSize;
6211 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
6212 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
6213 if ( stream_.deviceBuffer == NULL ) {
6214 errorText_ = "RtApiDs::probeDeviceOpen: error allocating device buffer memory.";
6220 // Allocate our DsHandle structures for the stream.
6221 if ( stream_.apiHandle == 0 ) {
6223 handle = new DsHandle;
6225 catch ( std::bad_alloc& ) {
6226 errorText_ = "RtApiDs::probeDeviceOpen: error allocating AsioHandle memory.";
6230 // Create a manual-reset event.
6231 handle->condition = CreateEvent( NULL, // no security
6232 TRUE, // manual-reset
6233 FALSE, // non-signaled initially
6235 stream_.apiHandle = (void *) handle;
6238 handle = (DsHandle *) stream_.apiHandle;
6239 handle->id[mode] = ohandle;
6240 handle->buffer[mode] = bhandle;
6241 handle->dsBufferSize[mode] = dsBufferSize;
6242 handle->dsPointerLeadTime[mode] = dsPointerLeadTime;
6244 stream_.device[mode] = device;
6245 stream_.state = STREAM_STOPPED;
6246 if ( stream_.mode == OUTPUT && mode == INPUT )
6247 // We had already set up an output stream.
6248 stream_.mode = DUPLEX;
6250 stream_.mode = mode;
6251 stream_.nBuffers = nBuffers;
6252 stream_.sampleRate = sampleRate;
6254 // Setup the buffer conversion information structure.
6255 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
6257 // Setup the callback thread.
6258 if ( stream_.callbackInfo.isRunning == false ) {
6260 stream_.callbackInfo.isRunning = true;
6261 stream_.callbackInfo.object = (void *) this;
6262 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &callbackHandler,
6263 &stream_.callbackInfo, 0, &threadId );
6264 if ( stream_.callbackInfo.thread == 0 ) {
6265 errorText_ = "RtApiDs::probeDeviceOpen: error creating callback thread!";
6269 // Boost DS thread priority
6270 SetThreadPriority( (HANDLE) stream_.callbackInfo.thread, THREAD_PRIORITY_HIGHEST );
6276 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
6277 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
6278 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6279 if ( buffer ) buffer->Release();
6282 if ( handle->buffer[1] ) {
6283 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
6284 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6285 if ( buffer ) buffer->Release();
6288 CloseHandle( handle->condition );
6290 stream_.apiHandle = 0;
6293 for ( int i=0; i<2; i++ ) {
6294 if ( stream_.userBuffer[i] ) {
6295 free( stream_.userBuffer[i] );
6296 stream_.userBuffer[i] = 0;
6300 if ( stream_.deviceBuffer ) {
6301 free( stream_.deviceBuffer );
6302 stream_.deviceBuffer = 0;
6305 stream_.state = STREAM_CLOSED;
6309 void RtApiDs :: closeStream()
6311 if ( stream_.state == STREAM_CLOSED ) {
6312 errorText_ = "RtApiDs::closeStream(): no open stream to close!";
6313 error( RtAudioError::WARNING );
6317 // Stop the callback thread.
6318 stream_.callbackInfo.isRunning = false;
6319 WaitForSingleObject( (HANDLE) stream_.callbackInfo.thread, INFINITE );
6320 CloseHandle( (HANDLE) stream_.callbackInfo.thread );
6322 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6324 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
6325 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
6326 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6333 if ( handle->buffer[1] ) {
6334 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
6335 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6342 CloseHandle( handle->condition );
6344 stream_.apiHandle = 0;
6347 for ( int i=0; i<2; i++ ) {
6348 if ( stream_.userBuffer[i] ) {
6349 free( stream_.userBuffer[i] );
6350 stream_.userBuffer[i] = 0;
6354 if ( stream_.deviceBuffer ) {
6355 free( stream_.deviceBuffer );
6356 stream_.deviceBuffer = 0;
6359 stream_.mode = UNINITIALIZED;
6360 stream_.state = STREAM_CLOSED;
6363 void RtApiDs :: startStream()
6366 if ( stream_.state == STREAM_RUNNING ) {
6367 errorText_ = "RtApiDs::startStream(): the stream is already running!";
6368 error( RtAudioError::WARNING );
6372 #if defined( HAVE_GETTIMEOFDAY )
6373 gettimeofday( &stream_.lastTickTimestamp, NULL );
6376 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6378 // Increase scheduler frequency on lesser windows (a side-effect of
6379 // increasing timer accuracy). On greater windows (Win2K or later),
6380 // this is already in effect.
6381 timeBeginPeriod( 1 );
6383 buffersRolling = false;
6384 duplexPrerollBytes = 0;
6386 if ( stream_.mode == DUPLEX ) {
6387 // 0.5 seconds of silence in DUPLEX mode while the devices spin up and synchronize.
6388 duplexPrerollBytes = (int) ( 0.5 * stream_.sampleRate * formatBytes( stream_.deviceFormat[1] ) * stream_.nDeviceChannels[1] );
6392 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6394 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6395 result = buffer->Play( 0, 0, DSBPLAY_LOOPING );
6396 if ( FAILED( result ) ) {
6397 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting output buffer!";
6398 errorText_ = errorStream_.str();
6403 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6405 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6406 result = buffer->Start( DSCBSTART_LOOPING );
6407 if ( FAILED( result ) ) {
6408 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting input buffer!";
6409 errorText_ = errorStream_.str();
6414 handle->drainCounter = 0;
6415 handle->internalDrain = false;
6416 ResetEvent( handle->condition );
6417 stream_.state = STREAM_RUNNING;
6420 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
6423 void RtApiDs :: stopStream()
6426 if ( stream_.state == STREAM_STOPPED ) {
6427 errorText_ = "RtApiDs::stopStream(): the stream is already stopped!";
6428 error( RtAudioError::WARNING );
6435 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6436 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6437 if ( handle->drainCounter == 0 ) {
6438 handle->drainCounter = 2;
6439 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
6442 stream_.state = STREAM_STOPPED;
6444 MUTEX_LOCK( &stream_.mutex );
6446 // Stop the buffer and clear memory
6447 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6448 result = buffer->Stop();
6449 if ( FAILED( result ) ) {
6450 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping output buffer!";
6451 errorText_ = errorStream_.str();
6455 // Lock the buffer and clear it so that if we start to play again,
6456 // we won't have old data playing.
6457 result = buffer->Lock( 0, handle->dsBufferSize[0], &audioPtr, &dataLen, NULL, NULL, 0 );
6458 if ( FAILED( result ) ) {
6459 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking output buffer!";
6460 errorText_ = errorStream_.str();
6464 // Zero the DS buffer
6465 ZeroMemory( audioPtr, dataLen );
6467 // Unlock the DS buffer
6468 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6469 if ( FAILED( result ) ) {
6470 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking output buffer!";
6471 errorText_ = errorStream_.str();
6475 // If we start playing again, we must begin at beginning of buffer.
6476 handle->bufferPointer[0] = 0;
6479 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6480 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6484 stream_.state = STREAM_STOPPED;
6486 if ( stream_.mode != DUPLEX )
6487 MUTEX_LOCK( &stream_.mutex );
6489 result = buffer->Stop();
6490 if ( FAILED( result ) ) {
6491 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping input buffer!";
6492 errorText_ = errorStream_.str();
6496 // Lock the buffer and clear it so that if we start to play again,
6497 // we won't have old data playing.
6498 result = buffer->Lock( 0, handle->dsBufferSize[1], &audioPtr, &dataLen, NULL, NULL, 0 );
6499 if ( FAILED( result ) ) {
6500 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking input buffer!";
6501 errorText_ = errorStream_.str();
6505 // Zero the DS buffer
6506 ZeroMemory( audioPtr, dataLen );
6508 // Unlock the DS buffer
6509 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6510 if ( FAILED( result ) ) {
6511 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking input buffer!";
6512 errorText_ = errorStream_.str();
6516 // If we start recording again, we must begin at beginning of buffer.
6517 handle->bufferPointer[1] = 0;
6521 timeEndPeriod( 1 ); // revert to normal scheduler frequency on lesser windows.
6522 MUTEX_UNLOCK( &stream_.mutex );
6524 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
6527 void RtApiDs :: abortStream()
6530 if ( stream_.state == STREAM_STOPPED ) {
6531 errorText_ = "RtApiDs::abortStream(): the stream is already stopped!";
6532 error( RtAudioError::WARNING );
6536 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6537 handle->drainCounter = 2;
6542 void RtApiDs :: callbackEvent()
6544 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) {
6545 Sleep( 50 ); // sleep 50 milliseconds
6549 if ( stream_.state == STREAM_CLOSED ) {
6550 errorText_ = "RtApiDs::callbackEvent(): the stream is closed ... this shouldn't happen!";
6551 error( RtAudioError::WARNING );
6555 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
6556 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6558 // Check if we were draining the stream and signal is finished.
6559 if ( handle->drainCounter > stream_.nBuffers + 2 ) {
6561 stream_.state = STREAM_STOPPING;
6562 if ( handle->internalDrain == false )
6563 SetEvent( handle->condition );
6569 // Invoke user callback to get fresh output data UNLESS we are
6571 if ( handle->drainCounter == 0 ) {
6572 RtAudioCallback callback = (RtAudioCallback) info->callback;
6573 double streamTime = getStreamTime();
6574 RtAudioStreamStatus status = 0;
6575 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
6576 status |= RTAUDIO_OUTPUT_UNDERFLOW;
6577 handle->xrun[0] = false;
6579 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
6580 status |= RTAUDIO_INPUT_OVERFLOW;
6581 handle->xrun[1] = false;
6583 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
6584 stream_.bufferSize, streamTime, status, info->userData );
6585 if ( cbReturnValue == 2 ) {
6586 stream_.state = STREAM_STOPPING;
6587 handle->drainCounter = 2;
6591 else if ( cbReturnValue == 1 ) {
6592 handle->drainCounter = 1;
6593 handle->internalDrain = true;
6598 DWORD currentWritePointer, safeWritePointer;
6599 DWORD currentReadPointer, safeReadPointer;
6600 UINT nextWritePointer;
6602 LPVOID buffer1 = NULL;
6603 LPVOID buffer2 = NULL;
6604 DWORD bufferSize1 = 0;
6605 DWORD bufferSize2 = 0;
6610 MUTEX_LOCK( &stream_.mutex );
6611 if ( stream_.state == STREAM_STOPPED ) {
6612 MUTEX_UNLOCK( &stream_.mutex );
6616 if ( buffersRolling == false ) {
6617 if ( stream_.mode == DUPLEX ) {
6618 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
6620 // It takes a while for the devices to get rolling. As a result,
6621 // there's no guarantee that the capture and write device pointers
6622 // will move in lockstep. Wait here for both devices to start
6623 // rolling, and then set our buffer pointers accordingly.
6624 // e.g. Crystal Drivers: the capture buffer starts up 5700 to 9600
6625 // bytes later than the write buffer.
6627 // Stub: a serious risk of having a pre-emptive scheduling round
6628 // take place between the two GetCurrentPosition calls... but I'm
6629 // really not sure how to solve the problem. Temporarily boost to
6630 // Realtime priority, maybe; but I'm not sure what priority the
6631 // DirectSound service threads run at. We *should* be roughly
6632 // within a ms or so of correct.
6634 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6635 LPDIRECTSOUNDCAPTUREBUFFER dsCaptureBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6637 DWORD startSafeWritePointer, startSafeReadPointer;
6639 result = dsWriteBuffer->GetCurrentPosition( NULL, &startSafeWritePointer );
6640 if ( FAILED( result ) ) {
6641 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6642 errorText_ = errorStream_.str();
6643 MUTEX_UNLOCK( &stream_.mutex );
6644 error( RtAudioError::SYSTEM_ERROR );
6647 result = dsCaptureBuffer->GetCurrentPosition( NULL, &startSafeReadPointer );
6648 if ( FAILED( result ) ) {
6649 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6650 errorText_ = errorStream_.str();
6651 MUTEX_UNLOCK( &stream_.mutex );
6652 error( RtAudioError::SYSTEM_ERROR );
6656 result = dsWriteBuffer->GetCurrentPosition( NULL, &safeWritePointer );
6657 if ( FAILED( result ) ) {
6658 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6659 errorText_ = errorStream_.str();
6660 MUTEX_UNLOCK( &stream_.mutex );
6661 error( RtAudioError::SYSTEM_ERROR );
6664 result = dsCaptureBuffer->GetCurrentPosition( NULL, &safeReadPointer );
6665 if ( FAILED( result ) ) {
6666 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6667 errorText_ = errorStream_.str();
6668 MUTEX_UNLOCK( &stream_.mutex );
6669 error( RtAudioError::SYSTEM_ERROR );
6672 if ( safeWritePointer != startSafeWritePointer && safeReadPointer != startSafeReadPointer ) break;
6676 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
6678 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
6679 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
6680 handle->bufferPointer[1] = safeReadPointer;
6682 else if ( stream_.mode == OUTPUT ) {
6684 // Set the proper nextWritePosition after initial startup.
6685 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6686 result = dsWriteBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
6687 if ( FAILED( result ) ) {
6688 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6689 errorText_ = errorStream_.str();
6690 MUTEX_UNLOCK( &stream_.mutex );
6691 error( RtAudioError::SYSTEM_ERROR );
6694 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
6695 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
6698 buffersRolling = true;
6701 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6703 LPDIRECTSOUNDBUFFER dsBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6705 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
6706 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
6707 bufferBytes *= formatBytes( stream_.userFormat );
6708 memset( stream_.userBuffer[0], 0, bufferBytes );
6711 // Setup parameters and do buffer conversion if necessary.
6712 if ( stream_.doConvertBuffer[0] ) {
6713 buffer = stream_.deviceBuffer;
6714 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
6715 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[0];
6716 bufferBytes *= formatBytes( stream_.deviceFormat[0] );
6719 buffer = stream_.userBuffer[0];
6720 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
6721 bufferBytes *= formatBytes( stream_.userFormat );
6724 // No byte swapping necessary in DirectSound implementation.
6726 // Ahhh ... windoze. 16-bit data is signed but 8-bit data is
6727 // unsigned. So, we need to convert our signed 8-bit data here to
6729 if ( stream_.deviceFormat[0] == RTAUDIO_SINT8 )
6730 for ( int i=0; i<bufferBytes; i++ ) buffer[i] = (unsigned char) ( buffer[i] + 128 );
6732 DWORD dsBufferSize = handle->dsBufferSize[0];
6733 nextWritePointer = handle->bufferPointer[0];
6735 DWORD endWrite, leadPointer;
6737 // Find out where the read and "safe write" pointers are.
6738 result = dsBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
6739 if ( FAILED( result ) ) {
6740 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6741 errorText_ = errorStream_.str();
6742 MUTEX_UNLOCK( &stream_.mutex );
6743 error( RtAudioError::SYSTEM_ERROR );
6747 // We will copy our output buffer into the region between
6748 // safeWritePointer and leadPointer. If leadPointer is not
6749 // beyond the next endWrite position, wait until it is.
6750 leadPointer = safeWritePointer + handle->dsPointerLeadTime[0];
6751 //std::cout << "safeWritePointer = " << safeWritePointer << ", leadPointer = " << leadPointer << ", nextWritePointer = " << nextWritePointer << std::endl;
6752 if ( leadPointer > dsBufferSize ) leadPointer -= dsBufferSize;
6753 if ( leadPointer < nextWritePointer ) leadPointer += dsBufferSize; // unwrap offset
6754 endWrite = nextWritePointer + bufferBytes;
6756 // Check whether the entire write region is behind the play pointer.
6757 if ( leadPointer >= endWrite ) break;
6759 // If we are here, then we must wait until the leadPointer advances
6760 // beyond the end of our next write region. We use the
6761 // Sleep() function to suspend operation until that happens.
6762 double millis = ( endWrite - leadPointer ) * 1000.0;
6763 millis /= ( formatBytes( stream_.deviceFormat[0]) * stream_.nDeviceChannels[0] * stream_.sampleRate);
6764 if ( millis < 1.0 ) millis = 1.0;
6765 Sleep( (DWORD) millis );
6768 if ( dsPointerBetween( nextWritePointer, safeWritePointer, currentWritePointer, dsBufferSize )
6769 || dsPointerBetween( endWrite, safeWritePointer, currentWritePointer, dsBufferSize ) ) {
6770 // We've strayed into the forbidden zone ... resync the read pointer.
6771 handle->xrun[0] = true;
6772 nextWritePointer = safeWritePointer + handle->dsPointerLeadTime[0] - bufferBytes;
6773 if ( nextWritePointer >= dsBufferSize ) nextWritePointer -= dsBufferSize;
6774 handle->bufferPointer[0] = nextWritePointer;
6775 endWrite = nextWritePointer + bufferBytes;
6778 // Lock free space in the buffer
6779 result = dsBuffer->Lock( nextWritePointer, bufferBytes, &buffer1,
6780 &bufferSize1, &buffer2, &bufferSize2, 0 );
6781 if ( FAILED( result ) ) {
6782 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking buffer during playback!";
6783 errorText_ = errorStream_.str();
6784 MUTEX_UNLOCK( &stream_.mutex );
6785 error( RtAudioError::SYSTEM_ERROR );
6789 // Copy our buffer into the DS buffer
6790 CopyMemory( buffer1, buffer, bufferSize1 );
6791 if ( buffer2 != NULL ) CopyMemory( buffer2, buffer+bufferSize1, bufferSize2 );
6793 // Update our buffer offset and unlock sound buffer
6794 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
6795 if ( FAILED( result ) ) {
6796 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking buffer during playback!";
6797 errorText_ = errorStream_.str();
6798 MUTEX_UNLOCK( &stream_.mutex );
6799 error( RtAudioError::SYSTEM_ERROR );
6802 nextWritePointer = ( nextWritePointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
6803 handle->bufferPointer[0] = nextWritePointer;
6806 // Don't bother draining input
6807 if ( handle->drainCounter ) {
6808 handle->drainCounter++;
6812 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6814 // Setup parameters.
6815 if ( stream_.doConvertBuffer[1] ) {
6816 buffer = stream_.deviceBuffer;
6817 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[1];
6818 bufferBytes *= formatBytes( stream_.deviceFormat[1] );
6821 buffer = stream_.userBuffer[1];
6822 bufferBytes = stream_.bufferSize * stream_.nUserChannels[1];
6823 bufferBytes *= formatBytes( stream_.userFormat );
6826 LPDIRECTSOUNDCAPTUREBUFFER dsBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6827 long nextReadPointer = handle->bufferPointer[1];
6828 DWORD dsBufferSize = handle->dsBufferSize[1];
6830 // Find out where the write and "safe read" pointers are.
6831 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
6832 if ( FAILED( result ) ) {
6833 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6834 errorText_ = errorStream_.str();
6835 MUTEX_UNLOCK( &stream_.mutex );
6836 error( RtAudioError::SYSTEM_ERROR );
6840 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
6841 DWORD endRead = nextReadPointer + bufferBytes;
6843 // Handling depends on whether we are INPUT or DUPLEX.
6844 // If we're in INPUT mode then waiting is a good thing. If we're in DUPLEX mode,
6845 // then a wait here will drag the write pointers into the forbidden zone.
6847 // In DUPLEX mode, rather than wait, we will back off the read pointer until
6848 // it's in a safe position. This causes dropouts, but it seems to be the only
6849 // practical way to sync up the read and write pointers reliably, given the
6850 // the very complex relationship between phase and increment of the read and write
6853 // In order to minimize audible dropouts in DUPLEX mode, we will
6854 // provide a pre-roll period of 0.5 seconds in which we return
6855 // zeros from the read buffer while the pointers sync up.
6857 if ( stream_.mode == DUPLEX ) {
6858 if ( safeReadPointer < endRead ) {
6859 if ( duplexPrerollBytes <= 0 ) {
6860 // Pre-roll time over. Be more agressive.
6861 int adjustment = endRead-safeReadPointer;
6863 handle->xrun[1] = true;
6865 // - large adjustments: we've probably run out of CPU cycles, so just resync exactly,
6866 // and perform fine adjustments later.
6867 // - small adjustments: back off by twice as much.
6868 if ( adjustment >= 2*bufferBytes )
6869 nextReadPointer = safeReadPointer-2*bufferBytes;
6871 nextReadPointer = safeReadPointer-bufferBytes-adjustment;
6873 if ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
6877 // In pre=roll time. Just do it.
6878 nextReadPointer = safeReadPointer - bufferBytes;
6879 while ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
6881 endRead = nextReadPointer + bufferBytes;
6884 else { // mode == INPUT
6885 while ( safeReadPointer < endRead && stream_.callbackInfo.isRunning ) {
6886 // See comments for playback.
6887 double millis = (endRead - safeReadPointer) * 1000.0;
6888 millis /= ( formatBytes(stream_.deviceFormat[1]) * stream_.nDeviceChannels[1] * stream_.sampleRate);
6889 if ( millis < 1.0 ) millis = 1.0;
6890 Sleep( (DWORD) millis );
6892 // Wake up and find out where we are now.
6893 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
6894 if ( FAILED( result ) ) {
6895 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6896 errorText_ = errorStream_.str();
6897 MUTEX_UNLOCK( &stream_.mutex );
6898 error( RtAudioError::SYSTEM_ERROR );
6902 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
6906 // Lock free space in the buffer
6907 result = dsBuffer->Lock( nextReadPointer, bufferBytes, &buffer1,
6908 &bufferSize1, &buffer2, &bufferSize2, 0 );
6909 if ( FAILED( result ) ) {
6910 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking capture buffer!";
6911 errorText_ = errorStream_.str();
6912 MUTEX_UNLOCK( &stream_.mutex );
6913 error( RtAudioError::SYSTEM_ERROR );
6917 if ( duplexPrerollBytes <= 0 ) {
6918 // Copy our buffer into the DS buffer
6919 CopyMemory( buffer, buffer1, bufferSize1 );
6920 if ( buffer2 != NULL ) CopyMemory( buffer+bufferSize1, buffer2, bufferSize2 );
6923 memset( buffer, 0, bufferSize1 );
6924 if ( buffer2 != NULL ) memset( buffer + bufferSize1, 0, bufferSize2 );
6925 duplexPrerollBytes -= bufferSize1 + bufferSize2;
6928 // Update our buffer offset and unlock sound buffer
6929 nextReadPointer = ( nextReadPointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
6930 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
6931 if ( FAILED( result ) ) {
6932 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking capture buffer!";
6933 errorText_ = errorStream_.str();
6934 MUTEX_UNLOCK( &stream_.mutex );
6935 error( RtAudioError::SYSTEM_ERROR );
6938 handle->bufferPointer[1] = nextReadPointer;
6940 // No byte swapping necessary in DirectSound implementation.
6942 // If necessary, convert 8-bit data from unsigned to signed.
6943 if ( stream_.deviceFormat[1] == RTAUDIO_SINT8 )
6944 for ( int j=0; j<bufferBytes; j++ ) buffer[j] = (signed char) ( buffer[j] - 128 );
6946 // Do buffer conversion if necessary.
6947 if ( stream_.doConvertBuffer[1] )
6948 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
6952 MUTEX_UNLOCK( &stream_.mutex );
6953 RtApi::tickStreamTime();
6956 // Definitions for utility functions and callbacks
6957 // specific to the DirectSound implementation.
6959 static unsigned __stdcall callbackHandler( void *ptr )
6961 CallbackInfo *info = (CallbackInfo *) ptr;
6962 RtApiDs *object = (RtApiDs *) info->object;
6963 bool* isRunning = &info->isRunning;
6965 while ( *isRunning == true ) {
6966 object->callbackEvent();
6973 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
6974 LPCTSTR description,
6978 struct DsProbeData& probeInfo = *(struct DsProbeData*) lpContext;
6979 std::vector<struct DsDevice>& dsDevices = *probeInfo.dsDevices;
6982 bool validDevice = false;
6983 if ( probeInfo.isInput == true ) {
6985 LPDIRECTSOUNDCAPTURE object;
6987 hr = DirectSoundCaptureCreate( lpguid, &object, NULL );
6988 if ( hr != DS_OK ) return TRUE;
6990 caps.dwSize = sizeof(caps);
6991 hr = object->GetCaps( &caps );
6992 if ( hr == DS_OK ) {
6993 if ( caps.dwChannels > 0 && caps.dwFormats > 0 )
7000 LPDIRECTSOUND object;
7001 hr = DirectSoundCreate( lpguid, &object, NULL );
7002 if ( hr != DS_OK ) return TRUE;
7004 caps.dwSize = sizeof(caps);
7005 hr = object->GetCaps( &caps );
7006 if ( hr == DS_OK ) {
7007 if ( caps.dwFlags & DSCAPS_PRIMARYMONO || caps.dwFlags & DSCAPS_PRIMARYSTEREO )
7013 // If good device, then save its name and guid.
7014 std::string name = convertCharPointerToStdString( description );
7015 //if ( name == "Primary Sound Driver" || name == "Primary Sound Capture Driver" )
7016 if ( lpguid == NULL )
7017 name = "Default Device";
7018 if ( validDevice ) {
7019 for ( unsigned int i=0; i<dsDevices.size(); i++ ) {
7020 if ( dsDevices[i].name == name ) {
7021 dsDevices[i].found = true;
7022 if ( probeInfo.isInput ) {
7023 dsDevices[i].id[1] = lpguid;
7024 dsDevices[i].validId[1] = true;
7027 dsDevices[i].id[0] = lpguid;
7028 dsDevices[i].validId[0] = true;
7036 device.found = true;
7037 if ( probeInfo.isInput ) {
7038 device.id[1] = lpguid;
7039 device.validId[1] = true;
7042 device.id[0] = lpguid;
7043 device.validId[0] = true;
7045 dsDevices.push_back( device );
7051 static const char* getErrorString( int code )
7055 case DSERR_ALLOCATED:
7056 return "Already allocated";
7058 case DSERR_CONTROLUNAVAIL:
7059 return "Control unavailable";
7061 case DSERR_INVALIDPARAM:
7062 return "Invalid parameter";
7064 case DSERR_INVALIDCALL:
7065 return "Invalid call";
7068 return "Generic error";
7070 case DSERR_PRIOLEVELNEEDED:
7071 return "Priority level needed";
7073 case DSERR_OUTOFMEMORY:
7074 return "Out of memory";
7076 case DSERR_BADFORMAT:
7077 return "The sample rate or the channel format is not supported";
7079 case DSERR_UNSUPPORTED:
7080 return "Not supported";
7082 case DSERR_NODRIVER:
7085 case DSERR_ALREADYINITIALIZED:
7086 return "Already initialized";
7088 case DSERR_NOAGGREGATION:
7089 return "No aggregation";
7091 case DSERR_BUFFERLOST:
7092 return "Buffer lost";
7094 case DSERR_OTHERAPPHASPRIO:
7095 return "Another application already has priority";
7097 case DSERR_UNINITIALIZED:
7098 return "Uninitialized";
7101 return "DirectSound unknown error";
7104 //******************** End of __WINDOWS_DS__ *********************//
7108 #if defined(__LINUX_ALSA__)
7110 #include <alsa/asoundlib.h>
7113 // A structure to hold various information related to the ALSA API
7116 snd_pcm_t *handles[2];
7119 pthread_cond_t runnable_cv;
7123 :synchronized(false), runnable(false) { xrun[0] = false; xrun[1] = false; }
7126 static void *alsaCallbackHandler( void * ptr );
7128 RtApiAlsa :: RtApiAlsa()
7130 // Nothing to do here.
7133 RtApiAlsa :: ~RtApiAlsa()
7135 if ( stream_.state != STREAM_CLOSED ) closeStream();
7138 unsigned int RtApiAlsa :: getDeviceCount( void )
7140 unsigned nDevices = 0;
7141 int result, subdevice, card;
7145 // Count cards and devices
7147 snd_card_next( &card );
7148 while ( card >= 0 ) {
7149 sprintf( name, "hw:%d", card );
7150 result = snd_ctl_open( &handle, name, 0 );
7152 errorStream_ << "RtApiAlsa::getDeviceCount: control open, card = " << card << ", " << snd_strerror( result ) << ".";
7153 errorText_ = errorStream_.str();
7154 error( RtAudioError::WARNING );
7159 result = snd_ctl_pcm_next_device( handle, &subdevice );
7161 errorStream_ << "RtApiAlsa::getDeviceCount: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
7162 errorText_ = errorStream_.str();
7163 error( RtAudioError::WARNING );
7166 if ( subdevice < 0 )
7171 snd_ctl_close( handle );
7172 snd_card_next( &card );
7175 result = snd_ctl_open( &handle, "default", 0 );
7178 snd_ctl_close( handle );
7184 RtAudio::DeviceInfo RtApiAlsa :: getDeviceInfo( unsigned int device )
7186 RtAudio::DeviceInfo info;
7187 info.probed = false;
7189 unsigned nDevices = 0;
7190 int result, subdevice, card;
7194 // Count cards and devices
7197 snd_card_next( &card );
7198 while ( card >= 0 ) {
7199 sprintf( name, "hw:%d", card );
7200 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
7202 errorStream_ << "RtApiAlsa::getDeviceInfo: control open, card = " << card << ", " << snd_strerror( result ) << ".";
7203 errorText_ = errorStream_.str();
7204 error( RtAudioError::WARNING );
7209 result = snd_ctl_pcm_next_device( chandle, &subdevice );
7211 errorStream_ << "RtApiAlsa::getDeviceInfo: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
7212 errorText_ = errorStream_.str();
7213 error( RtAudioError::WARNING );
7216 if ( subdevice < 0 ) break;
7217 if ( nDevices == device ) {
7218 sprintf( name, "hw:%d,%d", card, subdevice );
7224 snd_ctl_close( chandle );
7225 snd_card_next( &card );
7228 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
7229 if ( result == 0 ) {
7230 if ( nDevices == device ) {
7231 strcpy( name, "default" );
7237 if ( nDevices == 0 ) {
7238 errorText_ = "RtApiAlsa::getDeviceInfo: no devices found!";
7239 error( RtAudioError::INVALID_USE );
7243 if ( device >= nDevices ) {
7244 errorText_ = "RtApiAlsa::getDeviceInfo: device ID is invalid!";
7245 error( RtAudioError::INVALID_USE );
7251 // If a stream is already open, we cannot probe the stream devices.
7252 // Thus, use the saved results.
7253 if ( stream_.state != STREAM_CLOSED &&
7254 ( stream_.device[0] == device || stream_.device[1] == device ) ) {
7255 snd_ctl_close( chandle );
7256 if ( device >= devices_.size() ) {
7257 errorText_ = "RtApiAlsa::getDeviceInfo: device ID was not present before stream was opened.";
7258 error( RtAudioError::WARNING );
7261 return devices_[ device ];
7264 int openMode = SND_PCM_ASYNC;
7265 snd_pcm_stream_t stream;
7266 snd_pcm_info_t *pcminfo;
7267 snd_pcm_info_alloca( &pcminfo );
7269 snd_pcm_hw_params_t *params;
7270 snd_pcm_hw_params_alloca( ¶ms );
7272 // First try for playback unless default device (which has subdev -1)
7273 stream = SND_PCM_STREAM_PLAYBACK;
7274 snd_pcm_info_set_stream( pcminfo, stream );
7275 if ( subdevice != -1 ) {
7276 snd_pcm_info_set_device( pcminfo, subdevice );
7277 snd_pcm_info_set_subdevice( pcminfo, 0 );
7279 result = snd_ctl_pcm_info( chandle, pcminfo );
7281 // Device probably doesn't support playback.
7286 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK );
7288 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7289 errorText_ = errorStream_.str();
7290 error( RtAudioError::WARNING );
7294 // The device is open ... fill the parameter structure.
7295 result = snd_pcm_hw_params_any( phandle, params );
7297 snd_pcm_close( phandle );
7298 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7299 errorText_ = errorStream_.str();
7300 error( RtAudioError::WARNING );
7304 // Get output channel information.
7306 result = snd_pcm_hw_params_get_channels_max( params, &value );
7308 snd_pcm_close( phandle );
7309 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") output channels, " << snd_strerror( result ) << ".";
7310 errorText_ = errorStream_.str();
7311 error( RtAudioError::WARNING );
7314 info.outputChannels = value;
7315 snd_pcm_close( phandle );
7318 stream = SND_PCM_STREAM_CAPTURE;
7319 snd_pcm_info_set_stream( pcminfo, stream );
7321 // Now try for capture unless default device (with subdev = -1)
7322 if ( subdevice != -1 ) {
7323 result = snd_ctl_pcm_info( chandle, pcminfo );
7324 snd_ctl_close( chandle );
7326 // Device probably doesn't support capture.
7327 if ( info.outputChannels == 0 ) return info;
7328 goto probeParameters;
7332 snd_ctl_close( chandle );
7334 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
7336 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7337 errorText_ = errorStream_.str();
7338 error( RtAudioError::WARNING );
7339 if ( info.outputChannels == 0 ) return info;
7340 goto probeParameters;
7343 // The device is open ... fill the parameter structure.
7344 result = snd_pcm_hw_params_any( phandle, params );
7346 snd_pcm_close( phandle );
7347 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7348 errorText_ = errorStream_.str();
7349 error( RtAudioError::WARNING );
7350 if ( info.outputChannels == 0 ) return info;
7351 goto probeParameters;
7354 result = snd_pcm_hw_params_get_channels_max( params, &value );
7356 snd_pcm_close( phandle );
7357 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") input channels, " << snd_strerror( result ) << ".";
7358 errorText_ = errorStream_.str();
7359 error( RtAudioError::WARNING );
7360 if ( info.outputChannels == 0 ) return info;
7361 goto probeParameters;
7363 info.inputChannels = value;
7364 snd_pcm_close( phandle );
7366 // If device opens for both playback and capture, we determine the channels.
7367 if ( info.outputChannels > 0 && info.inputChannels > 0 )
7368 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
7370 // ALSA doesn't provide default devices so we'll use the first available one.
7371 if ( device == 0 && info.outputChannels > 0 )
7372 info.isDefaultOutput = true;
7373 if ( device == 0 && info.inputChannels > 0 )
7374 info.isDefaultInput = true;
7377 // At this point, we just need to figure out the supported data
7378 // formats and sample rates. We'll proceed by opening the device in
7379 // the direction with the maximum number of channels, or playback if
7380 // they are equal. This might limit our sample rate options, but so
7383 if ( info.outputChannels >= info.inputChannels )
7384 stream = SND_PCM_STREAM_PLAYBACK;
7386 stream = SND_PCM_STREAM_CAPTURE;
7387 snd_pcm_info_set_stream( pcminfo, stream );
7389 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
7391 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7392 errorText_ = errorStream_.str();
7393 error( RtAudioError::WARNING );
7397 // The device is open ... fill the parameter structure.
7398 result = snd_pcm_hw_params_any( phandle, params );
7400 snd_pcm_close( phandle );
7401 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7402 errorText_ = errorStream_.str();
7403 error( RtAudioError::WARNING );
7407 // Test our discrete set of sample rate values.
7408 info.sampleRates.clear();
7409 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
7410 if ( snd_pcm_hw_params_test_rate( phandle, params, SAMPLE_RATES[i], 0 ) == 0 ) {
7411 info.sampleRates.push_back( SAMPLE_RATES[i] );
7413 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
7414 info.preferredSampleRate = SAMPLE_RATES[i];
7417 if ( info.sampleRates.size() == 0 ) {
7418 snd_pcm_close( phandle );
7419 errorStream_ << "RtApiAlsa::getDeviceInfo: no supported sample rates found for device (" << name << ").";
7420 errorText_ = errorStream_.str();
7421 error( RtAudioError::WARNING );
7425 // Probe the supported data formats ... we don't care about endian-ness just yet
7426 snd_pcm_format_t format;
7427 info.nativeFormats = 0;
7428 format = SND_PCM_FORMAT_S8;
7429 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7430 info.nativeFormats |= RTAUDIO_SINT8;
7431 format = SND_PCM_FORMAT_S16;
7432 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7433 info.nativeFormats |= RTAUDIO_SINT16;
7434 format = SND_PCM_FORMAT_S24;
7435 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7436 info.nativeFormats |= RTAUDIO_SINT24;
7437 format = SND_PCM_FORMAT_S32;
7438 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7439 info.nativeFormats |= RTAUDIO_SINT32;
7440 format = SND_PCM_FORMAT_FLOAT;
7441 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7442 info.nativeFormats |= RTAUDIO_FLOAT32;
7443 format = SND_PCM_FORMAT_FLOAT64;
7444 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7445 info.nativeFormats |= RTAUDIO_FLOAT64;
7447 // Check that we have at least one supported format
7448 if ( info.nativeFormats == 0 ) {
7449 snd_pcm_close( phandle );
7450 errorStream_ << "RtApiAlsa::getDeviceInfo: pcm device (" << name << ") data format not supported by RtAudio.";
7451 errorText_ = errorStream_.str();
7452 error( RtAudioError::WARNING );
7456 // Get the device name
7458 result = snd_card_get_name( card, &cardname );
7459 if ( result >= 0 ) {
7460 sprintf( name, "hw:%s,%d", cardname, subdevice );
7465 // That's all ... close the device and return
7466 snd_pcm_close( phandle );
7471 void RtApiAlsa :: saveDeviceInfo( void )
7475 unsigned int nDevices = getDeviceCount();
7476 devices_.resize( nDevices );
7477 for ( unsigned int i=0; i<nDevices; i++ )
7478 devices_[i] = getDeviceInfo( i );
7481 bool RtApiAlsa :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
7482 unsigned int firstChannel, unsigned int sampleRate,
7483 RtAudioFormat format, unsigned int *bufferSize,
7484 RtAudio::StreamOptions *options )
7487 #if defined(__RTAUDIO_DEBUG__)
7489 snd_output_stdio_attach(&out, stderr, 0);
7492 // I'm not using the "plug" interface ... too much inconsistent behavior.
7494 unsigned nDevices = 0;
7495 int result, subdevice, card;
7499 if ( options && options->flags & RTAUDIO_ALSA_USE_DEFAULT )
7500 snprintf(name, sizeof(name), "%s", "default");
7502 // Count cards and devices
7504 snd_card_next( &card );
7505 while ( card >= 0 ) {
7506 sprintf( name, "hw:%d", card );
7507 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
7509 errorStream_ << "RtApiAlsa::probeDeviceOpen: control open, card = " << card << ", " << snd_strerror( result ) << ".";
7510 errorText_ = errorStream_.str();
7515 result = snd_ctl_pcm_next_device( chandle, &subdevice );
7516 if ( result < 0 ) break;
7517 if ( subdevice < 0 ) break;
7518 if ( nDevices == device ) {
7519 sprintf( name, "hw:%d,%d", card, subdevice );
7520 snd_ctl_close( chandle );
7525 snd_ctl_close( chandle );
7526 snd_card_next( &card );
7529 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
7530 if ( result == 0 ) {
7531 if ( nDevices == device ) {
7532 strcpy( name, "default" );
7533 snd_ctl_close( chandle );
7538 snd_ctl_close( chandle );
7540 if ( nDevices == 0 ) {
7541 // This should not happen because a check is made before this function is called.
7542 errorText_ = "RtApiAlsa::probeDeviceOpen: no devices found!";
7546 if ( device >= nDevices ) {
7547 // This should not happen because a check is made before this function is called.
7548 errorText_ = "RtApiAlsa::probeDeviceOpen: device ID is invalid!";
7555 // The getDeviceInfo() function will not work for a device that is
7556 // already open. Thus, we'll probe the system before opening a
7557 // stream and save the results for use by getDeviceInfo().
7558 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) // only do once
7559 this->saveDeviceInfo();
7561 snd_pcm_stream_t stream;
7562 if ( mode == OUTPUT )
7563 stream = SND_PCM_STREAM_PLAYBACK;
7565 stream = SND_PCM_STREAM_CAPTURE;
7568 int openMode = SND_PCM_ASYNC;
7569 result = snd_pcm_open( &phandle, name, stream, openMode );
7571 if ( mode == OUTPUT )
7572 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for output.";
7574 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for input.";
7575 errorText_ = errorStream_.str();
7579 // Fill the parameter structure.
7580 snd_pcm_hw_params_t *hw_params;
7581 snd_pcm_hw_params_alloca( &hw_params );
7582 result = snd_pcm_hw_params_any( phandle, hw_params );
7584 snd_pcm_close( phandle );
7585 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") parameters, " << snd_strerror( result ) << ".";
7586 errorText_ = errorStream_.str();
7590 #if defined(__RTAUDIO_DEBUG__)
7591 fprintf( stderr, "\nRtApiAlsa: dump hardware params just after device open:\n\n" );
7592 snd_pcm_hw_params_dump( hw_params, out );
7595 // Set access ... check user preference.
7596 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) {
7597 stream_.userInterleaved = false;
7598 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
7600 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
7601 stream_.deviceInterleaved[mode] = true;
7604 stream_.deviceInterleaved[mode] = false;
7607 stream_.userInterleaved = true;
7608 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
7610 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
7611 stream_.deviceInterleaved[mode] = false;
7614 stream_.deviceInterleaved[mode] = true;
7618 snd_pcm_close( phandle );
7619 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") access, " << snd_strerror( result ) << ".";
7620 errorText_ = errorStream_.str();
7624 // Determine how to set the device format.
7625 stream_.userFormat = format;
7626 snd_pcm_format_t deviceFormat = SND_PCM_FORMAT_UNKNOWN;
7628 if ( format == RTAUDIO_SINT8 )
7629 deviceFormat = SND_PCM_FORMAT_S8;
7630 else if ( format == RTAUDIO_SINT16 )
7631 deviceFormat = SND_PCM_FORMAT_S16;
7632 else if ( format == RTAUDIO_SINT24 )
7633 deviceFormat = SND_PCM_FORMAT_S24;
7634 else if ( format == RTAUDIO_SINT32 )
7635 deviceFormat = SND_PCM_FORMAT_S32;
7636 else if ( format == RTAUDIO_FLOAT32 )
7637 deviceFormat = SND_PCM_FORMAT_FLOAT;
7638 else if ( format == RTAUDIO_FLOAT64 )
7639 deviceFormat = SND_PCM_FORMAT_FLOAT64;
7641 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat) == 0) {
7642 stream_.deviceFormat[mode] = format;
7646 // The user requested format is not natively supported by the device.
7647 deviceFormat = SND_PCM_FORMAT_FLOAT64;
7648 if ( snd_pcm_hw_params_test_format( phandle, hw_params, deviceFormat ) == 0 ) {
7649 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
7653 deviceFormat = SND_PCM_FORMAT_FLOAT;
7654 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7655 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
7659 deviceFormat = SND_PCM_FORMAT_S32;
7660 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7661 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
7665 deviceFormat = SND_PCM_FORMAT_S24;
7666 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7667 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
7671 deviceFormat = SND_PCM_FORMAT_S16;
7672 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7673 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
7677 deviceFormat = SND_PCM_FORMAT_S8;
7678 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7679 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
7683 // If we get here, no supported format was found.
7684 snd_pcm_close( phandle );
7685 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device " << device << " data format not supported by RtAudio.";
7686 errorText_ = errorStream_.str();
7690 result = snd_pcm_hw_params_set_format( phandle, hw_params, deviceFormat );
7692 snd_pcm_close( phandle );
7693 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") data format, " << snd_strerror( result ) << ".";
7694 errorText_ = errorStream_.str();
7698 // Determine whether byte-swaping is necessary.
7699 stream_.doByteSwap[mode] = false;
7700 if ( deviceFormat != SND_PCM_FORMAT_S8 ) {
7701 result = snd_pcm_format_cpu_endian( deviceFormat );
7703 stream_.doByteSwap[mode] = true;
7704 else if (result < 0) {
7705 snd_pcm_close( phandle );
7706 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") endian-ness, " << snd_strerror( result ) << ".";
7707 errorText_ = errorStream_.str();
7712 // Set the sample rate.
7713 result = snd_pcm_hw_params_set_rate_near( phandle, hw_params, (unsigned int*) &sampleRate, 0 );
7715 snd_pcm_close( phandle );
7716 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting sample rate on device (" << name << "), " << snd_strerror( result ) << ".";
7717 errorText_ = errorStream_.str();
7721 // Determine the number of channels for this device. We support a possible
7722 // minimum device channel number > than the value requested by the user.
7723 stream_.nUserChannels[mode] = channels;
7725 result = snd_pcm_hw_params_get_channels_max( hw_params, &value );
7726 unsigned int deviceChannels = value;
7727 if ( result < 0 || deviceChannels < channels + firstChannel ) {
7728 snd_pcm_close( phandle );
7729 errorStream_ << "RtApiAlsa::probeDeviceOpen: requested channel parameters not supported by device (" << name << "), " << snd_strerror( result ) << ".";
7730 errorText_ = errorStream_.str();
7734 result = snd_pcm_hw_params_get_channels_min( hw_params, &value );
7736 snd_pcm_close( phandle );
7737 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting minimum channels for device (" << name << "), " << snd_strerror( result ) << ".";
7738 errorText_ = errorStream_.str();
7741 deviceChannels = value;
7742 if ( deviceChannels < channels + firstChannel ) deviceChannels = channels + firstChannel;
7743 stream_.nDeviceChannels[mode] = deviceChannels;
7745 // Set the device channels.
7746 result = snd_pcm_hw_params_set_channels( phandle, hw_params, deviceChannels );
7748 snd_pcm_close( phandle );
7749 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting channels for device (" << name << "), " << snd_strerror( result ) << ".";
7750 errorText_ = errorStream_.str();
7754 // Set the buffer (or period) size.
7756 snd_pcm_uframes_t periodSize = *bufferSize;
7757 result = snd_pcm_hw_params_set_period_size_near( phandle, hw_params, &periodSize, &dir );
7759 snd_pcm_close( phandle );
7760 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting period size for device (" << name << "), " << snd_strerror( result ) << ".";
7761 errorText_ = errorStream_.str();
7764 *bufferSize = periodSize;
7766 // Set the buffer number, which in ALSA is referred to as the "period".
7767 unsigned int periods = 0;
7768 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) periods = 2;
7769 if ( options && options->numberOfBuffers > 0 ) periods = options->numberOfBuffers;
7770 if ( periods < 2 ) periods = 4; // a fairly safe default value
7771 result = snd_pcm_hw_params_set_periods_near( phandle, hw_params, &periods, &dir );
7773 snd_pcm_close( phandle );
7774 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting periods for device (" << name << "), " << snd_strerror( result ) << ".";
7775 errorText_ = errorStream_.str();
7779 // If attempting to setup a duplex stream, the bufferSize parameter
7780 // MUST be the same in both directions!
7781 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
7782 snd_pcm_close( phandle );
7783 errorStream_ << "RtApiAlsa::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << name << ").";
7784 errorText_ = errorStream_.str();
7788 stream_.bufferSize = *bufferSize;
7790 // Install the hardware configuration
7791 result = snd_pcm_hw_params( phandle, hw_params );
7793 snd_pcm_close( phandle );
7794 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing hardware configuration on device (" << name << "), " << snd_strerror( result ) << ".";
7795 errorText_ = errorStream_.str();
7799 #if defined(__RTAUDIO_DEBUG__)
7800 fprintf(stderr, "\nRtApiAlsa: dump hardware params after installation:\n\n");
7801 snd_pcm_hw_params_dump( hw_params, out );
7804 // Set the software configuration to fill buffers with zeros and prevent device stopping on xruns.
7805 snd_pcm_sw_params_t *sw_params = NULL;
7806 snd_pcm_sw_params_alloca( &sw_params );
7807 snd_pcm_sw_params_current( phandle, sw_params );
7808 snd_pcm_sw_params_set_start_threshold( phandle, sw_params, *bufferSize );
7809 snd_pcm_sw_params_set_stop_threshold( phandle, sw_params, ULONG_MAX );
7810 snd_pcm_sw_params_set_silence_threshold( phandle, sw_params, 0 );
7812 // The following two settings were suggested by Theo Veenker
7813 //snd_pcm_sw_params_set_avail_min( phandle, sw_params, *bufferSize );
7814 //snd_pcm_sw_params_set_xfer_align( phandle, sw_params, 1 );
7816 // here are two options for a fix
7817 //snd_pcm_sw_params_set_silence_size( phandle, sw_params, ULONG_MAX );
7818 snd_pcm_uframes_t val;
7819 snd_pcm_sw_params_get_boundary( sw_params, &val );
7820 snd_pcm_sw_params_set_silence_size( phandle, sw_params, val );
7822 result = snd_pcm_sw_params( phandle, sw_params );
7824 snd_pcm_close( phandle );
7825 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing software configuration on device (" << name << "), " << snd_strerror( result ) << ".";
7826 errorText_ = errorStream_.str();
7830 #if defined(__RTAUDIO_DEBUG__)
7831 fprintf(stderr, "\nRtApiAlsa: dump software params after installation:\n\n");
7832 snd_pcm_sw_params_dump( sw_params, out );
7835 // Set flags for buffer conversion
7836 stream_.doConvertBuffer[mode] = false;
7837 if ( stream_.userFormat != stream_.deviceFormat[mode] )
7838 stream_.doConvertBuffer[mode] = true;
7839 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
7840 stream_.doConvertBuffer[mode] = true;
7841 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
7842 stream_.nUserChannels[mode] > 1 )
7843 stream_.doConvertBuffer[mode] = true;
7845 // Allocate the ApiHandle if necessary and then save.
7846 AlsaHandle *apiInfo = 0;
7847 if ( stream_.apiHandle == 0 ) {
7849 apiInfo = (AlsaHandle *) new AlsaHandle;
7851 catch ( std::bad_alloc& ) {
7852 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating AlsaHandle memory.";
7856 if ( pthread_cond_init( &apiInfo->runnable_cv, NULL ) ) {
7857 errorText_ = "RtApiAlsa::probeDeviceOpen: error initializing pthread condition variable.";
7861 stream_.apiHandle = (void *) apiInfo;
7862 apiInfo->handles[0] = 0;
7863 apiInfo->handles[1] = 0;
7866 apiInfo = (AlsaHandle *) stream_.apiHandle;
7868 apiInfo->handles[mode] = phandle;
7871 // Allocate necessary internal buffers.
7872 unsigned long bufferBytes;
7873 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
7874 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
7875 if ( stream_.userBuffer[mode] == NULL ) {
7876 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating user buffer memory.";
7880 if ( stream_.doConvertBuffer[mode] ) {
7882 bool makeBuffer = true;
7883 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
7884 if ( mode == INPUT ) {
7885 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
7886 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
7887 if ( bufferBytes <= bytesOut ) makeBuffer = false;
7892 bufferBytes *= *bufferSize;
7893 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
7894 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
7895 if ( stream_.deviceBuffer == NULL ) {
7896 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating device buffer memory.";
7902 stream_.sampleRate = sampleRate;
7903 stream_.nBuffers = periods;
7904 stream_.device[mode] = device;
7905 stream_.state = STREAM_STOPPED;
7907 // Setup the buffer conversion information structure.
7908 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
7910 // Setup thread if necessary.
7911 if ( stream_.mode == OUTPUT && mode == INPUT ) {
7912 // We had already set up an output stream.
7913 stream_.mode = DUPLEX;
7914 // Link the streams if possible.
7915 apiInfo->synchronized = false;
7916 if ( snd_pcm_link( apiInfo->handles[0], apiInfo->handles[1] ) == 0 )
7917 apiInfo->synchronized = true;
7919 errorText_ = "RtApiAlsa::probeDeviceOpen: unable to synchronize input and output devices.";
7920 error( RtAudioError::WARNING );
7924 stream_.mode = mode;
7926 // Setup callback thread.
7927 stream_.callbackInfo.object = (void *) this;
7929 // Set the thread attributes for joinable and realtime scheduling
7930 // priority (optional). The higher priority will only take affect
7931 // if the program is run as root or suid. Note, under Linux
7932 // processes with CAP_SYS_NICE privilege, a user can change
7933 // scheduling policy and priority (thus need not be root). See
7934 // POSIX "capabilities".
7935 pthread_attr_t attr;
7936 pthread_attr_init( &attr );
7937 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
7938 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
7939 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
7940 stream_.callbackInfo.doRealtime = true;
7941 struct sched_param param;
7942 int priority = options->priority;
7943 int min = sched_get_priority_min( SCHED_RR );
7944 int max = sched_get_priority_max( SCHED_RR );
7945 if ( priority < min ) priority = min;
7946 else if ( priority > max ) priority = max;
7947 param.sched_priority = priority;
7949 // Set the policy BEFORE the priority. Otherwise it fails.
7950 pthread_attr_setschedpolicy(&attr, SCHED_RR);
7951 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
7952 // This is definitely required. Otherwise it fails.
7953 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
7954 pthread_attr_setschedparam(&attr, ¶m);
7957 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
7959 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
7962 stream_.callbackInfo.isRunning = true;
7963 result = pthread_create( &stream_.callbackInfo.thread, &attr, alsaCallbackHandler, &stream_.callbackInfo );
7964 pthread_attr_destroy( &attr );
7966 // Failed. Try instead with default attributes.
7967 result = pthread_create( &stream_.callbackInfo.thread, NULL, alsaCallbackHandler, &stream_.callbackInfo );
7969 stream_.callbackInfo.isRunning = false;
7970 errorText_ = "RtApiAlsa::error creating callback thread!";
7980 pthread_cond_destroy( &apiInfo->runnable_cv );
7981 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
7982 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
7984 stream_.apiHandle = 0;
7987 if ( phandle) snd_pcm_close( phandle );
7989 for ( int i=0; i<2; i++ ) {
7990 if ( stream_.userBuffer[i] ) {
7991 free( stream_.userBuffer[i] );
7992 stream_.userBuffer[i] = 0;
7996 if ( stream_.deviceBuffer ) {
7997 free( stream_.deviceBuffer );
7998 stream_.deviceBuffer = 0;
8001 stream_.state = STREAM_CLOSED;
8005 void RtApiAlsa :: closeStream()
8007 if ( stream_.state == STREAM_CLOSED ) {
8008 errorText_ = "RtApiAlsa::closeStream(): no open stream to close!";
8009 error( RtAudioError::WARNING );
8013 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8014 stream_.callbackInfo.isRunning = false;
8015 MUTEX_LOCK( &stream_.mutex );
8016 if ( stream_.state == STREAM_STOPPED ) {
8017 apiInfo->runnable = true;
8018 pthread_cond_signal( &apiInfo->runnable_cv );
8020 MUTEX_UNLOCK( &stream_.mutex );
8021 pthread_join( stream_.callbackInfo.thread, NULL );
8023 if ( stream_.state == STREAM_RUNNING ) {
8024 stream_.state = STREAM_STOPPED;
8025 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
8026 snd_pcm_drop( apiInfo->handles[0] );
8027 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
8028 snd_pcm_drop( apiInfo->handles[1] );
8032 pthread_cond_destroy( &apiInfo->runnable_cv );
8033 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
8034 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
8036 stream_.apiHandle = 0;
8039 for ( int i=0; i<2; i++ ) {
8040 if ( stream_.userBuffer[i] ) {
8041 free( stream_.userBuffer[i] );
8042 stream_.userBuffer[i] = 0;
8046 if ( stream_.deviceBuffer ) {
8047 free( stream_.deviceBuffer );
8048 stream_.deviceBuffer = 0;
8051 stream_.mode = UNINITIALIZED;
8052 stream_.state = STREAM_CLOSED;
8055 void RtApiAlsa :: startStream()
8057 // This method calls snd_pcm_prepare if the device isn't already in that state.
8060 if ( stream_.state == STREAM_RUNNING ) {
8061 errorText_ = "RtApiAlsa::startStream(): the stream is already running!";
8062 error( RtAudioError::WARNING );
8066 MUTEX_LOCK( &stream_.mutex );
8068 #if defined( HAVE_GETTIMEOFDAY )
8069 gettimeofday( &stream_.lastTickTimestamp, NULL );
8073 snd_pcm_state_t state;
8074 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8075 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
8076 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8077 state = snd_pcm_state( handle[0] );
8078 if ( state != SND_PCM_STATE_PREPARED ) {
8079 result = snd_pcm_prepare( handle[0] );
8081 errorStream_ << "RtApiAlsa::startStream: error preparing output pcm device, " << snd_strerror( result ) << ".";
8082 errorText_ = errorStream_.str();
8088 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
8089 result = snd_pcm_drop(handle[1]); // fix to remove stale data received since device has been open
8090 state = snd_pcm_state( handle[1] );
8091 if ( state != SND_PCM_STATE_PREPARED ) {
8092 result = snd_pcm_prepare( handle[1] );
8094 errorStream_ << "RtApiAlsa::startStream: error preparing input pcm device, " << snd_strerror( result ) << ".";
8095 errorText_ = errorStream_.str();
8101 stream_.state = STREAM_RUNNING;
8104 apiInfo->runnable = true;
8105 pthread_cond_signal( &apiInfo->runnable_cv );
8106 MUTEX_UNLOCK( &stream_.mutex );
8108 if ( result >= 0 ) return;
8109 error( RtAudioError::SYSTEM_ERROR );
8112 void RtApiAlsa :: stopStream()
8115 if ( stream_.state == STREAM_STOPPED ) {
8116 errorText_ = "RtApiAlsa::stopStream(): the stream is already stopped!";
8117 error( RtAudioError::WARNING );
8121 stream_.state = STREAM_STOPPED;
8122 MUTEX_LOCK( &stream_.mutex );
8125 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8126 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
8127 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8128 if ( apiInfo->synchronized )
8129 result = snd_pcm_drop( handle[0] );
8131 result = snd_pcm_drain( handle[0] );
8133 errorStream_ << "RtApiAlsa::stopStream: error draining output pcm device, " << snd_strerror( result ) << ".";
8134 errorText_ = errorStream_.str();
8139 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
8140 result = snd_pcm_drop( handle[1] );
8142 errorStream_ << "RtApiAlsa::stopStream: error stopping input pcm device, " << snd_strerror( result ) << ".";
8143 errorText_ = errorStream_.str();
8149 apiInfo->runnable = false; // fixes high CPU usage when stopped
8150 MUTEX_UNLOCK( &stream_.mutex );
8152 if ( result >= 0 ) return;
8153 error( RtAudioError::SYSTEM_ERROR );
8156 void RtApiAlsa :: abortStream()
8159 if ( stream_.state == STREAM_STOPPED ) {
8160 errorText_ = "RtApiAlsa::abortStream(): the stream is already stopped!";
8161 error( RtAudioError::WARNING );
8165 stream_.state = STREAM_STOPPED;
8166 MUTEX_LOCK( &stream_.mutex );
8169 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8170 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
8171 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8172 result = snd_pcm_drop( handle[0] );
8174 errorStream_ << "RtApiAlsa::abortStream: error aborting output pcm device, " << snd_strerror( result ) << ".";
8175 errorText_ = errorStream_.str();
8180 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
8181 result = snd_pcm_drop( handle[1] );
8183 errorStream_ << "RtApiAlsa::abortStream: error aborting input pcm device, " << snd_strerror( result ) << ".";
8184 errorText_ = errorStream_.str();
8190 apiInfo->runnable = false; // fixes high CPU usage when stopped
8191 MUTEX_UNLOCK( &stream_.mutex );
8193 if ( result >= 0 ) return;
8194 error( RtAudioError::SYSTEM_ERROR );
8197 void RtApiAlsa :: callbackEvent()
8199 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8200 if ( stream_.state == STREAM_STOPPED ) {
8201 MUTEX_LOCK( &stream_.mutex );
8202 while ( !apiInfo->runnable )
8203 pthread_cond_wait( &apiInfo->runnable_cv, &stream_.mutex );
8205 if ( stream_.state != STREAM_RUNNING ) {
8206 MUTEX_UNLOCK( &stream_.mutex );
8209 MUTEX_UNLOCK( &stream_.mutex );
8212 if ( stream_.state == STREAM_CLOSED ) {
8213 errorText_ = "RtApiAlsa::callbackEvent(): the stream is closed ... this shouldn't happen!";
8214 error( RtAudioError::WARNING );
8218 int doStopStream = 0;
8219 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
8220 double streamTime = getStreamTime();
8221 RtAudioStreamStatus status = 0;
8222 if ( stream_.mode != INPUT && apiInfo->xrun[0] == true ) {
8223 status |= RTAUDIO_OUTPUT_UNDERFLOW;
8224 apiInfo->xrun[0] = false;
8226 if ( stream_.mode != OUTPUT && apiInfo->xrun[1] == true ) {
8227 status |= RTAUDIO_INPUT_OVERFLOW;
8228 apiInfo->xrun[1] = false;
8230 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
8231 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
8233 if ( doStopStream == 2 ) {
8238 MUTEX_LOCK( &stream_.mutex );
8240 // The state might change while waiting on a mutex.
8241 if ( stream_.state == STREAM_STOPPED ) goto unlock;
8247 snd_pcm_sframes_t frames;
8248 RtAudioFormat format;
8249 handle = (snd_pcm_t **) apiInfo->handles;
8251 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
8253 // Setup parameters.
8254 if ( stream_.doConvertBuffer[1] ) {
8255 buffer = stream_.deviceBuffer;
8256 channels = stream_.nDeviceChannels[1];
8257 format = stream_.deviceFormat[1];
8260 buffer = stream_.userBuffer[1];
8261 channels = stream_.nUserChannels[1];
8262 format = stream_.userFormat;
8265 // Read samples from device in interleaved/non-interleaved format.
8266 if ( stream_.deviceInterleaved[1] )
8267 result = snd_pcm_readi( handle[1], buffer, stream_.bufferSize );
8269 void *bufs[channels];
8270 size_t offset = stream_.bufferSize * formatBytes( format );
8271 for ( int i=0; i<channels; i++ )
8272 bufs[i] = (void *) (buffer + (i * offset));
8273 result = snd_pcm_readn( handle[1], bufs, stream_.bufferSize );
8276 if ( result < (int) stream_.bufferSize ) {
8277 // Either an error or overrun occured.
8278 if ( result == -EPIPE ) {
8279 snd_pcm_state_t state = snd_pcm_state( handle[1] );
8280 if ( state == SND_PCM_STATE_XRUN ) {
8281 apiInfo->xrun[1] = true;
8282 result = snd_pcm_prepare( handle[1] );
8284 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after overrun, " << snd_strerror( result ) << ".";
8285 errorText_ = errorStream_.str();
8289 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
8290 errorText_ = errorStream_.str();
8294 errorStream_ << "RtApiAlsa::callbackEvent: audio read error, " << snd_strerror( result ) << ".";
8295 errorText_ = errorStream_.str();
8297 error( RtAudioError::WARNING );
8301 // Do byte swapping if necessary.
8302 if ( stream_.doByteSwap[1] )
8303 byteSwapBuffer( buffer, stream_.bufferSize * channels, format );
8305 // Do buffer conversion if necessary.
8306 if ( stream_.doConvertBuffer[1] )
8307 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
8309 // Check stream latency
8310 result = snd_pcm_delay( handle[1], &frames );
8311 if ( result == 0 && frames > 0 ) stream_.latency[1] = frames;
8316 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8318 // Setup parameters and do buffer conversion if necessary.
8319 if ( stream_.doConvertBuffer[0] ) {
8320 buffer = stream_.deviceBuffer;
8321 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
8322 channels = stream_.nDeviceChannels[0];
8323 format = stream_.deviceFormat[0];
8326 buffer = stream_.userBuffer[0];
8327 channels = stream_.nUserChannels[0];
8328 format = stream_.userFormat;
8331 // Do byte swapping if necessary.
8332 if ( stream_.doByteSwap[0] )
8333 byteSwapBuffer(buffer, stream_.bufferSize * channels, format);
8335 // Write samples to device in interleaved/non-interleaved format.
8336 if ( stream_.deviceInterleaved[0] )
8337 result = snd_pcm_writei( handle[0], buffer, stream_.bufferSize );
8339 void *bufs[channels];
8340 size_t offset = stream_.bufferSize * formatBytes( format );
8341 for ( int i=0; i<channels; i++ )
8342 bufs[i] = (void *) (buffer + (i * offset));
8343 result = snd_pcm_writen( handle[0], bufs, stream_.bufferSize );
8346 if ( result < (int) stream_.bufferSize ) {
8347 // Either an error or underrun occured.
8348 if ( result == -EPIPE ) {
8349 snd_pcm_state_t state = snd_pcm_state( handle[0] );
8350 if ( state == SND_PCM_STATE_XRUN ) {
8351 apiInfo->xrun[0] = true;
8352 result = snd_pcm_prepare( handle[0] );
8354 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after underrun, " << snd_strerror( result ) << ".";
8355 errorText_ = errorStream_.str();
8358 errorText_ = "RtApiAlsa::callbackEvent: audio write error, underrun.";
8361 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
8362 errorText_ = errorStream_.str();
8366 errorStream_ << "RtApiAlsa::callbackEvent: audio write error, " << snd_strerror( result ) << ".";
8367 errorText_ = errorStream_.str();
8369 error( RtAudioError::WARNING );
8373 // Check stream latency
8374 result = snd_pcm_delay( handle[0], &frames );
8375 if ( result == 0 && frames > 0 ) stream_.latency[0] = frames;
8379 MUTEX_UNLOCK( &stream_.mutex );
8381 RtApi::tickStreamTime();
8382 if ( doStopStream == 1 ) this->stopStream();
8385 static void *alsaCallbackHandler( void *ptr )
8387 CallbackInfo *info = (CallbackInfo *) ptr;
8388 RtApiAlsa *object = (RtApiAlsa *) info->object;
8389 bool *isRunning = &info->isRunning;
8391 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
8392 if ( info->doRealtime ) {
8393 std::cerr << "RtAudio alsa: " <<
8394 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
8395 "running realtime scheduling" << std::endl;
8399 while ( *isRunning == true ) {
8400 pthread_testcancel();
8401 object->callbackEvent();
8404 pthread_exit( NULL );
8407 //******************** End of __LINUX_ALSA__ *********************//
8410 #if defined(__LINUX_PULSE__)
8412 // Code written by Peter Meerwald, pmeerw@pmeerw.net
8413 // and Tristan Matthews.
8415 #include <pulse/error.h>
8416 #include <pulse/simple.h>
8419 static const unsigned int SUPPORTED_SAMPLERATES[] = { 8000, 16000, 22050, 32000,
8420 44100, 48000, 96000, 0};
8422 struct rtaudio_pa_format_mapping_t {
8423 RtAudioFormat rtaudio_format;
8424 pa_sample_format_t pa_format;
8427 static const rtaudio_pa_format_mapping_t supported_sampleformats[] = {
8428 {RTAUDIO_SINT16, PA_SAMPLE_S16LE},
8429 {RTAUDIO_SINT32, PA_SAMPLE_S32LE},
8430 {RTAUDIO_FLOAT32, PA_SAMPLE_FLOAT32LE},
8431 {0, PA_SAMPLE_INVALID}};
8433 struct PulseAudioHandle {
8437 pthread_cond_t runnable_cv;
8439 PulseAudioHandle() : s_play(0), s_rec(0), runnable(false) { }
8442 RtApiPulse::~RtApiPulse()
8444 if ( stream_.state != STREAM_CLOSED )
8448 unsigned int RtApiPulse::getDeviceCount( void )
8453 RtAudio::DeviceInfo RtApiPulse::getDeviceInfo( unsigned int /*device*/ )
8455 RtAudio::DeviceInfo info;
8457 info.name = "PulseAudio";
8458 info.outputChannels = 2;
8459 info.inputChannels = 2;
8460 info.duplexChannels = 2;
8461 info.isDefaultOutput = true;
8462 info.isDefaultInput = true;
8464 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr )
8465 info.sampleRates.push_back( *sr );
8467 info.preferredSampleRate = 48000;
8468 info.nativeFormats = RTAUDIO_SINT16 | RTAUDIO_SINT32 | RTAUDIO_FLOAT32;
8473 static void *pulseaudio_callback( void * user )
8475 CallbackInfo *cbi = static_cast<CallbackInfo *>( user );
8476 RtApiPulse *context = static_cast<RtApiPulse *>( cbi->object );
8477 volatile bool *isRunning = &cbi->isRunning;
8479 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
8480 if (cbi->doRealtime) {
8481 std::cerr << "RtAudio pulse: " <<
8482 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
8483 "running realtime scheduling" << std::endl;
8487 while ( *isRunning ) {
8488 pthread_testcancel();
8489 context->callbackEvent();
8492 pthread_exit( NULL );
8495 void RtApiPulse::closeStream( void )
8497 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8499 stream_.callbackInfo.isRunning = false;
8501 MUTEX_LOCK( &stream_.mutex );
8502 if ( stream_.state == STREAM_STOPPED ) {
8503 pah->runnable = true;
8504 pthread_cond_signal( &pah->runnable_cv );
8506 MUTEX_UNLOCK( &stream_.mutex );
8508 pthread_join( pah->thread, 0 );
8509 if ( pah->s_play ) {
8510 pa_simple_flush( pah->s_play, NULL );
8511 pa_simple_free( pah->s_play );
8514 pa_simple_free( pah->s_rec );
8516 pthread_cond_destroy( &pah->runnable_cv );
8518 stream_.apiHandle = 0;
8521 if ( stream_.userBuffer[0] ) {
8522 free( stream_.userBuffer[0] );
8523 stream_.userBuffer[0] = 0;
8525 if ( stream_.userBuffer[1] ) {
8526 free( stream_.userBuffer[1] );
8527 stream_.userBuffer[1] = 0;
8530 stream_.state = STREAM_CLOSED;
8531 stream_.mode = UNINITIALIZED;
8534 void RtApiPulse::callbackEvent( void )
8536 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8538 if ( stream_.state == STREAM_STOPPED ) {
8539 MUTEX_LOCK( &stream_.mutex );
8540 while ( !pah->runnable )
8541 pthread_cond_wait( &pah->runnable_cv, &stream_.mutex );
8543 if ( stream_.state != STREAM_RUNNING ) {
8544 MUTEX_UNLOCK( &stream_.mutex );
8547 MUTEX_UNLOCK( &stream_.mutex );
8550 if ( stream_.state == STREAM_CLOSED ) {
8551 errorText_ = "RtApiPulse::callbackEvent(): the stream is closed ... "
8552 "this shouldn't happen!";
8553 error( RtAudioError::WARNING );
8557 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
8558 double streamTime = getStreamTime();
8559 RtAudioStreamStatus status = 0;
8560 int doStopStream = callback( stream_.userBuffer[OUTPUT], stream_.userBuffer[INPUT],
8561 stream_.bufferSize, streamTime, status,
8562 stream_.callbackInfo.userData );
8564 if ( doStopStream == 2 ) {
8569 MUTEX_LOCK( &stream_.mutex );
8570 void *pulse_in = stream_.doConvertBuffer[INPUT] ? stream_.deviceBuffer : stream_.userBuffer[INPUT];
8571 void *pulse_out = stream_.doConvertBuffer[OUTPUT] ? stream_.deviceBuffer : stream_.userBuffer[OUTPUT];
8573 if ( stream_.state != STREAM_RUNNING )
8578 if (stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8579 if ( stream_.doConvertBuffer[OUTPUT] ) {
8580 convertBuffer( stream_.deviceBuffer,
8581 stream_.userBuffer[OUTPUT],
8582 stream_.convertInfo[OUTPUT] );
8583 bytes = stream_.nDeviceChannels[OUTPUT] * stream_.bufferSize *
8584 formatBytes( stream_.deviceFormat[OUTPUT] );
8586 bytes = stream_.nUserChannels[OUTPUT] * stream_.bufferSize *
8587 formatBytes( stream_.userFormat );
8589 if ( pa_simple_write( pah->s_play, pulse_out, bytes, &pa_error ) < 0 ) {
8590 errorStream_ << "RtApiPulse::callbackEvent: audio write error, " <<
8591 pa_strerror( pa_error ) << ".";
8592 errorText_ = errorStream_.str();
8593 error( RtAudioError::WARNING );
8597 if ( stream_.mode == INPUT || stream_.mode == DUPLEX) {
8598 if ( stream_.doConvertBuffer[INPUT] )
8599 bytes = stream_.nDeviceChannels[INPUT] * stream_.bufferSize *
8600 formatBytes( stream_.deviceFormat[INPUT] );
8602 bytes = stream_.nUserChannels[INPUT] * stream_.bufferSize *
8603 formatBytes( stream_.userFormat );
8605 if ( pa_simple_read( pah->s_rec, pulse_in, bytes, &pa_error ) < 0 ) {
8606 errorStream_ << "RtApiPulse::callbackEvent: audio read error, " <<
8607 pa_strerror( pa_error ) << ".";
8608 errorText_ = errorStream_.str();
8609 error( RtAudioError::WARNING );
8611 if ( stream_.doConvertBuffer[INPUT] ) {
8612 convertBuffer( stream_.userBuffer[INPUT],
8613 stream_.deviceBuffer,
8614 stream_.convertInfo[INPUT] );
8619 MUTEX_UNLOCK( &stream_.mutex );
8620 RtApi::tickStreamTime();
8622 if ( doStopStream == 1 )
8626 void RtApiPulse::startStream( void )
8628 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8630 if ( stream_.state == STREAM_CLOSED ) {
8631 errorText_ = "RtApiPulse::startStream(): the stream is not open!";
8632 error( RtAudioError::INVALID_USE );
8635 if ( stream_.state == STREAM_RUNNING ) {
8636 errorText_ = "RtApiPulse::startStream(): the stream is already running!";
8637 error( RtAudioError::WARNING );
8641 MUTEX_LOCK( &stream_.mutex );
8643 #if defined( HAVE_GETTIMEOFDAY )
8644 gettimeofday( &stream_.lastTickTimestamp, NULL );
8647 stream_.state = STREAM_RUNNING;
8649 pah->runnable = true;
8650 pthread_cond_signal( &pah->runnable_cv );
8651 MUTEX_UNLOCK( &stream_.mutex );
8654 void RtApiPulse::stopStream( void )
8656 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8658 if ( stream_.state == STREAM_CLOSED ) {
8659 errorText_ = "RtApiPulse::stopStream(): the stream is not open!";
8660 error( RtAudioError::INVALID_USE );
8663 if ( stream_.state == STREAM_STOPPED ) {
8664 errorText_ = "RtApiPulse::stopStream(): the stream is already stopped!";
8665 error( RtAudioError::WARNING );
8669 stream_.state = STREAM_STOPPED;
8670 MUTEX_LOCK( &stream_.mutex );
8672 if ( pah && pah->s_play ) {
8674 if ( pa_simple_drain( pah->s_play, &pa_error ) < 0 ) {
8675 errorStream_ << "RtApiPulse::stopStream: error draining output device, " <<
8676 pa_strerror( pa_error ) << ".";
8677 errorText_ = errorStream_.str();
8678 MUTEX_UNLOCK( &stream_.mutex );
8679 error( RtAudioError::SYSTEM_ERROR );
8684 stream_.state = STREAM_STOPPED;
8685 MUTEX_UNLOCK( &stream_.mutex );
8688 void RtApiPulse::abortStream( void )
8690 PulseAudioHandle *pah = static_cast<PulseAudioHandle*>( stream_.apiHandle );
8692 if ( stream_.state == STREAM_CLOSED ) {
8693 errorText_ = "RtApiPulse::abortStream(): the stream is not open!";
8694 error( RtAudioError::INVALID_USE );
8697 if ( stream_.state == STREAM_STOPPED ) {
8698 errorText_ = "RtApiPulse::abortStream(): the stream is already stopped!";
8699 error( RtAudioError::WARNING );
8703 stream_.state = STREAM_STOPPED;
8704 MUTEX_LOCK( &stream_.mutex );
8706 if ( pah && pah->s_play ) {
8708 if ( pa_simple_flush( pah->s_play, &pa_error ) < 0 ) {
8709 errorStream_ << "RtApiPulse::abortStream: error flushing output device, " <<
8710 pa_strerror( pa_error ) << ".";
8711 errorText_ = errorStream_.str();
8712 MUTEX_UNLOCK( &stream_.mutex );
8713 error( RtAudioError::SYSTEM_ERROR );
8718 stream_.state = STREAM_STOPPED;
8719 MUTEX_UNLOCK( &stream_.mutex );
8722 bool RtApiPulse::probeDeviceOpen( unsigned int device, StreamMode mode,
8723 unsigned int channels, unsigned int firstChannel,
8724 unsigned int sampleRate, RtAudioFormat format,
8725 unsigned int *bufferSize, RtAudio::StreamOptions *options )
8727 PulseAudioHandle *pah = 0;
8728 unsigned long bufferBytes = 0;
8731 if ( device != 0 ) return false;
8732 if ( mode != INPUT && mode != OUTPUT ) return false;
8733 if ( channels != 1 && channels != 2 ) {
8734 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported number of channels.";
8737 ss.channels = channels;
8739 if ( firstChannel != 0 ) return false;
8741 bool sr_found = false;
8742 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr ) {
8743 if ( sampleRate == *sr ) {
8745 stream_.sampleRate = sampleRate;
8746 ss.rate = sampleRate;
8751 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported sample rate.";
8756 for ( const rtaudio_pa_format_mapping_t *sf = supported_sampleformats;
8757 sf->rtaudio_format && sf->pa_format != PA_SAMPLE_INVALID; ++sf ) {
8758 if ( format == sf->rtaudio_format ) {
8760 stream_.userFormat = sf->rtaudio_format;
8761 stream_.deviceFormat[mode] = stream_.userFormat;
8762 ss.format = sf->pa_format;
8766 if ( !sf_found ) { // Use internal data format conversion.
8767 stream_.userFormat = format;
8768 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
8769 ss.format = PA_SAMPLE_FLOAT32LE;
8772 // Set other stream parameters.
8773 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
8774 else stream_.userInterleaved = true;
8775 stream_.deviceInterleaved[mode] = true;
8776 stream_.nBuffers = 1;
8777 stream_.doByteSwap[mode] = false;
8778 stream_.nUserChannels[mode] = channels;
8779 stream_.nDeviceChannels[mode] = channels + firstChannel;
8780 stream_.channelOffset[mode] = 0;
8781 std::string streamName = "RtAudio";
8783 // Set flags for buffer conversion.
8784 stream_.doConvertBuffer[mode] = false;
8785 if ( stream_.userFormat != stream_.deviceFormat[mode] )
8786 stream_.doConvertBuffer[mode] = true;
8787 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
8788 stream_.doConvertBuffer[mode] = true;
8790 // Allocate necessary internal buffers.
8791 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
8792 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
8793 if ( stream_.userBuffer[mode] == NULL ) {
8794 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating user buffer memory.";
8797 stream_.bufferSize = *bufferSize;
8799 if ( stream_.doConvertBuffer[mode] ) {
8801 bool makeBuffer = true;
8802 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
8803 if ( mode == INPUT ) {
8804 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
8805 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
8806 if ( bufferBytes <= bytesOut ) makeBuffer = false;
8811 bufferBytes *= *bufferSize;
8812 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
8813 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
8814 if ( stream_.deviceBuffer == NULL ) {
8815 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating device buffer memory.";
8821 stream_.device[mode] = device;
8823 // Setup the buffer conversion information structure.
8824 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
8826 if ( !stream_.apiHandle ) {
8827 PulseAudioHandle *pah = new PulseAudioHandle;
8829 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating memory for handle.";
8833 stream_.apiHandle = pah;
8834 if ( pthread_cond_init( &pah->runnable_cv, NULL ) != 0 ) {
8835 errorText_ = "RtApiPulse::probeDeviceOpen: error creating condition variable.";
8839 pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8842 if ( options && !options->streamName.empty() ) streamName = options->streamName;
8845 pa_buffer_attr buffer_attr;
8846 buffer_attr.fragsize = bufferBytes;
8847 buffer_attr.maxlength = -1;
8849 pah->s_rec = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_RECORD, NULL, "Record", &ss, NULL, &buffer_attr, &error );
8850 if ( !pah->s_rec ) {
8851 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting input to PulseAudio server.";
8856 pah->s_play = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_PLAYBACK, NULL, "Playback", &ss, NULL, NULL, &error );
8857 if ( !pah->s_play ) {
8858 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting output to PulseAudio server.";
8866 if ( stream_.mode == UNINITIALIZED )
8867 stream_.mode = mode;
8868 else if ( stream_.mode == mode )
8871 stream_.mode = DUPLEX;
8873 if ( !stream_.callbackInfo.isRunning ) {
8874 stream_.callbackInfo.object = this;
8876 stream_.state = STREAM_STOPPED;
8877 // Set the thread attributes for joinable and realtime scheduling
8878 // priority (optional). The higher priority will only take affect
8879 // if the program is run as root or suid. Note, under Linux
8880 // processes with CAP_SYS_NICE privilege, a user can change
8881 // scheduling policy and priority (thus need not be root). See
8882 // POSIX "capabilities".
8883 pthread_attr_t attr;
8884 pthread_attr_init( &attr );
8885 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
8886 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
8887 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
8888 stream_.callbackInfo.doRealtime = true;
8889 struct sched_param param;
8890 int priority = options->priority;
8891 int min = sched_get_priority_min( SCHED_RR );
8892 int max = sched_get_priority_max( SCHED_RR );
8893 if ( priority < min ) priority = min;
8894 else if ( priority > max ) priority = max;
8895 param.sched_priority = priority;
8897 // Set the policy BEFORE the priority. Otherwise it fails.
8898 pthread_attr_setschedpolicy(&attr, SCHED_RR);
8899 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
8900 // This is definitely required. Otherwise it fails.
8901 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
8902 pthread_attr_setschedparam(&attr, ¶m);
8905 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
8907 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
8910 stream_.callbackInfo.isRunning = true;
8911 int result = pthread_create( &pah->thread, &attr, pulseaudio_callback, (void *)&stream_.callbackInfo);
8912 pthread_attr_destroy(&attr);
8914 // Failed. Try instead with default attributes.
8915 result = pthread_create( &pah->thread, NULL, pulseaudio_callback, (void *)&stream_.callbackInfo);
8917 stream_.callbackInfo.isRunning = false;
8918 errorText_ = "RtApiPulse::probeDeviceOpen: error creating thread.";
8927 if ( pah && stream_.callbackInfo.isRunning ) {
8928 pthread_cond_destroy( &pah->runnable_cv );
8930 stream_.apiHandle = 0;
8933 for ( int i=0; i<2; i++ ) {
8934 if ( stream_.userBuffer[i] ) {
8935 free( stream_.userBuffer[i] );
8936 stream_.userBuffer[i] = 0;
8940 if ( stream_.deviceBuffer ) {
8941 free( stream_.deviceBuffer );
8942 stream_.deviceBuffer = 0;
8945 stream_.state = STREAM_CLOSED;
8949 //******************** End of __LINUX_PULSE__ *********************//
8952 #if defined(__LINUX_OSS__)
8955 #include <sys/ioctl.h>
8958 #include <sys/soundcard.h>
8962 static void *ossCallbackHandler(void * ptr);
8964 // A structure to hold various information related to the OSS API
8967 int id[2]; // device ids
8970 pthread_cond_t runnable;
8973 :triggered(false) { id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
8976 RtApiOss :: RtApiOss()
8978 // Nothing to do here.
8981 RtApiOss :: ~RtApiOss()
8983 if ( stream_.state != STREAM_CLOSED ) closeStream();
8986 unsigned int RtApiOss :: getDeviceCount( void )
8988 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
8989 if ( mixerfd == -1 ) {
8990 errorText_ = "RtApiOss::getDeviceCount: error opening '/dev/mixer'.";
8991 error( RtAudioError::WARNING );
8995 oss_sysinfo sysinfo;
8996 if ( ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo ) == -1 ) {
8998 errorText_ = "RtApiOss::getDeviceCount: error getting sysinfo, OSS version >= 4.0 is required.";
8999 error( RtAudioError::WARNING );
9004 return sysinfo.numaudios;
9007 RtAudio::DeviceInfo RtApiOss :: getDeviceInfo( unsigned int device )
9009 RtAudio::DeviceInfo info;
9010 info.probed = false;
9012 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
9013 if ( mixerfd == -1 ) {
9014 errorText_ = "RtApiOss::getDeviceInfo: error opening '/dev/mixer'.";
9015 error( RtAudioError::WARNING );
9019 oss_sysinfo sysinfo;
9020 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
9021 if ( result == -1 ) {
9023 errorText_ = "RtApiOss::getDeviceInfo: error getting sysinfo, OSS version >= 4.0 is required.";
9024 error( RtAudioError::WARNING );
9028 unsigned nDevices = sysinfo.numaudios;
9029 if ( nDevices == 0 ) {
9031 errorText_ = "RtApiOss::getDeviceInfo: no devices found!";
9032 error( RtAudioError::INVALID_USE );
9036 if ( device >= nDevices ) {
9038 errorText_ = "RtApiOss::getDeviceInfo: device ID is invalid!";
9039 error( RtAudioError::INVALID_USE );
9043 oss_audioinfo ainfo;
9045 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
9047 if ( result == -1 ) {
9048 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
9049 errorText_ = errorStream_.str();
9050 error( RtAudioError::WARNING );
9055 if ( ainfo.caps & PCM_CAP_OUTPUT ) info.outputChannels = ainfo.max_channels;
9056 if ( ainfo.caps & PCM_CAP_INPUT ) info.inputChannels = ainfo.max_channels;
9057 if ( ainfo.caps & PCM_CAP_DUPLEX ) {
9058 if ( info.outputChannels > 0 && info.inputChannels > 0 && ainfo.caps & PCM_CAP_DUPLEX )
9059 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
9062 // Probe data formats ... do for input
9063 unsigned long mask = ainfo.iformats;
9064 if ( mask & AFMT_S16_LE || mask & AFMT_S16_BE )
9065 info.nativeFormats |= RTAUDIO_SINT16;
9066 if ( mask & AFMT_S8 )
9067 info.nativeFormats |= RTAUDIO_SINT8;
9068 if ( mask & AFMT_S32_LE || mask & AFMT_S32_BE )
9069 info.nativeFormats |= RTAUDIO_SINT32;
9071 if ( mask & AFMT_FLOAT )
9072 info.nativeFormats |= RTAUDIO_FLOAT32;
9074 if ( mask & AFMT_S24_LE || mask & AFMT_S24_BE )
9075 info.nativeFormats |= RTAUDIO_SINT24;
9077 // Check that we have at least one supported format
9078 if ( info.nativeFormats == 0 ) {
9079 errorStream_ << "RtApiOss::getDeviceInfo: device (" << ainfo.name << ") data format not supported by RtAudio.";
9080 errorText_ = errorStream_.str();
9081 error( RtAudioError::WARNING );
9085 // Probe the supported sample rates.
9086 info.sampleRates.clear();
9087 if ( ainfo.nrates ) {
9088 for ( unsigned int i=0; i<ainfo.nrates; i++ ) {
9089 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
9090 if ( ainfo.rates[i] == SAMPLE_RATES[k] ) {
9091 info.sampleRates.push_back( SAMPLE_RATES[k] );
9093 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
9094 info.preferredSampleRate = SAMPLE_RATES[k];
9102 // Check min and max rate values;
9103 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
9104 if ( ainfo.min_rate <= (int) SAMPLE_RATES[k] && ainfo.max_rate >= (int) SAMPLE_RATES[k] ) {
9105 info.sampleRates.push_back( SAMPLE_RATES[k] );
9107 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
9108 info.preferredSampleRate = SAMPLE_RATES[k];
9113 if ( info.sampleRates.size() == 0 ) {
9114 errorStream_ << "RtApiOss::getDeviceInfo: no supported sample rates found for device (" << ainfo.name << ").";
9115 errorText_ = errorStream_.str();
9116 error( RtAudioError::WARNING );
9120 info.name = ainfo.name;
9127 bool RtApiOss :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
9128 unsigned int firstChannel, unsigned int sampleRate,
9129 RtAudioFormat format, unsigned int *bufferSize,
9130 RtAudio::StreamOptions *options )
9132 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
9133 if ( mixerfd == -1 ) {
9134 errorText_ = "RtApiOss::probeDeviceOpen: error opening '/dev/mixer'.";
9138 oss_sysinfo sysinfo;
9139 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
9140 if ( result == -1 ) {
9142 errorText_ = "RtApiOss::probeDeviceOpen: error getting sysinfo, OSS version >= 4.0 is required.";
9146 unsigned nDevices = sysinfo.numaudios;
9147 if ( nDevices == 0 ) {
9148 // This should not happen because a check is made before this function is called.
9150 errorText_ = "RtApiOss::probeDeviceOpen: no devices found!";
9154 if ( device >= nDevices ) {
9155 // This should not happen because a check is made before this function is called.
9157 errorText_ = "RtApiOss::probeDeviceOpen: device ID is invalid!";
9161 oss_audioinfo ainfo;
9163 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
9165 if ( result == -1 ) {
9166 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
9167 errorText_ = errorStream_.str();
9171 // Check if device supports input or output
9172 if ( ( mode == OUTPUT && !( ainfo.caps & PCM_CAP_OUTPUT ) ) ||
9173 ( mode == INPUT && !( ainfo.caps & PCM_CAP_INPUT ) ) ) {
9174 if ( mode == OUTPUT )
9175 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support output.";
9177 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support input.";
9178 errorText_ = errorStream_.str();
9183 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9184 if ( mode == OUTPUT )
9186 else { // mode == INPUT
9187 if (stream_.mode == OUTPUT && stream_.device[0] == device) {
9188 // We just set the same device for playback ... close and reopen for duplex (OSS only).
9189 close( handle->id[0] );
9191 if ( !( ainfo.caps & PCM_CAP_DUPLEX ) ) {
9192 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support duplex mode.";
9193 errorText_ = errorStream_.str();
9196 // Check that the number previously set channels is the same.
9197 if ( stream_.nUserChannels[0] != channels ) {
9198 errorStream_ << "RtApiOss::probeDeviceOpen: input/output channels must be equal for OSS duplex device (" << ainfo.name << ").";
9199 errorText_ = errorStream_.str();
9208 // Set exclusive access if specified.
9209 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) flags |= O_EXCL;
9211 // Try to open the device.
9213 fd = open( ainfo.devnode, flags, 0 );
9215 if ( errno == EBUSY )
9216 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") is busy.";
9218 errorStream_ << "RtApiOss::probeDeviceOpen: error opening device (" << ainfo.name << ").";
9219 errorText_ = errorStream_.str();
9223 // For duplex operation, specifically set this mode (this doesn't seem to work).
9225 if ( flags | O_RDWR ) {
9226 result = ioctl( fd, SNDCTL_DSP_SETDUPLEX, NULL );
9227 if ( result == -1) {
9228 errorStream_ << "RtApiOss::probeDeviceOpen: error setting duplex mode for device (" << ainfo.name << ").";
9229 errorText_ = errorStream_.str();
9235 // Check the device channel support.
9236 stream_.nUserChannels[mode] = channels;
9237 if ( ainfo.max_channels < (int)(channels + firstChannel) ) {
9239 errorStream_ << "RtApiOss::probeDeviceOpen: the device (" << ainfo.name << ") does not support requested channel parameters.";
9240 errorText_ = errorStream_.str();
9244 // Set the number of channels.
9245 int deviceChannels = channels + firstChannel;
9246 result = ioctl( fd, SNDCTL_DSP_CHANNELS, &deviceChannels );
9247 if ( result == -1 || deviceChannels < (int)(channels + firstChannel) ) {
9249 errorStream_ << "RtApiOss::probeDeviceOpen: error setting channel parameters on device (" << ainfo.name << ").";
9250 errorText_ = errorStream_.str();
9253 stream_.nDeviceChannels[mode] = deviceChannels;
9255 // Get the data format mask
9257 result = ioctl( fd, SNDCTL_DSP_GETFMTS, &mask );
9258 if ( result == -1 ) {
9260 errorStream_ << "RtApiOss::probeDeviceOpen: error getting device (" << ainfo.name << ") data formats.";
9261 errorText_ = errorStream_.str();
9265 // Determine how to set the device format.
9266 stream_.userFormat = format;
9267 int deviceFormat = -1;
9268 stream_.doByteSwap[mode] = false;
9269 if ( format == RTAUDIO_SINT8 ) {
9270 if ( mask & AFMT_S8 ) {
9271 deviceFormat = AFMT_S8;
9272 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
9275 else if ( format == RTAUDIO_SINT16 ) {
9276 if ( mask & AFMT_S16_NE ) {
9277 deviceFormat = AFMT_S16_NE;
9278 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9280 else if ( mask & AFMT_S16_OE ) {
9281 deviceFormat = AFMT_S16_OE;
9282 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9283 stream_.doByteSwap[mode] = true;
9286 else if ( format == RTAUDIO_SINT24 ) {
9287 if ( mask & AFMT_S24_NE ) {
9288 deviceFormat = AFMT_S24_NE;
9289 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9291 else if ( mask & AFMT_S24_OE ) {
9292 deviceFormat = AFMT_S24_OE;
9293 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9294 stream_.doByteSwap[mode] = true;
9297 else if ( format == RTAUDIO_SINT32 ) {
9298 if ( mask & AFMT_S32_NE ) {
9299 deviceFormat = AFMT_S32_NE;
9300 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9302 else if ( mask & AFMT_S32_OE ) {
9303 deviceFormat = AFMT_S32_OE;
9304 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9305 stream_.doByteSwap[mode] = true;
9309 if ( deviceFormat == -1 ) {
9310 // The user requested format is not natively supported by the device.
9311 if ( mask & AFMT_S16_NE ) {
9312 deviceFormat = AFMT_S16_NE;
9313 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9315 else if ( mask & AFMT_S32_NE ) {
9316 deviceFormat = AFMT_S32_NE;
9317 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9319 else if ( mask & AFMT_S24_NE ) {
9320 deviceFormat = AFMT_S24_NE;
9321 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9323 else if ( mask & AFMT_S16_OE ) {
9324 deviceFormat = AFMT_S16_OE;
9325 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9326 stream_.doByteSwap[mode] = true;
9328 else if ( mask & AFMT_S32_OE ) {
9329 deviceFormat = AFMT_S32_OE;
9330 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9331 stream_.doByteSwap[mode] = true;
9333 else if ( mask & AFMT_S24_OE ) {
9334 deviceFormat = AFMT_S24_OE;
9335 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9336 stream_.doByteSwap[mode] = true;
9338 else if ( mask & AFMT_S8) {
9339 deviceFormat = AFMT_S8;
9340 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
9344 if ( stream_.deviceFormat[mode] == 0 ) {
9345 // This really shouldn't happen ...
9347 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") data format not supported by RtAudio.";
9348 errorText_ = errorStream_.str();
9352 // Set the data format.
9353 int temp = deviceFormat;
9354 result = ioctl( fd, SNDCTL_DSP_SETFMT, &deviceFormat );
9355 if ( result == -1 || deviceFormat != temp ) {
9357 errorStream_ << "RtApiOss::probeDeviceOpen: error setting data format on device (" << ainfo.name << ").";
9358 errorText_ = errorStream_.str();
9362 // Attempt to set the buffer size. According to OSS, the minimum
9363 // number of buffers is two. The supposed minimum buffer size is 16
9364 // bytes, so that will be our lower bound. The argument to this
9365 // call is in the form 0xMMMMSSSS (hex), where the buffer size (in
9366 // bytes) is given as 2^SSSS and the number of buffers as 2^MMMM.
9367 // We'll check the actual value used near the end of the setup
9369 int ossBufferBytes = *bufferSize * formatBytes( stream_.deviceFormat[mode] ) * deviceChannels;
9370 if ( ossBufferBytes < 16 ) ossBufferBytes = 16;
9372 if ( options ) buffers = options->numberOfBuffers;
9373 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) buffers = 2;
9374 if ( buffers < 2 ) buffers = 3;
9375 temp = ((int) buffers << 16) + (int)( log10( (double)ossBufferBytes ) / log10( 2.0 ) );
9376 result = ioctl( fd, SNDCTL_DSP_SETFRAGMENT, &temp );
9377 if ( result == -1 ) {
9379 errorStream_ << "RtApiOss::probeDeviceOpen: error setting buffer size on device (" << ainfo.name << ").";
9380 errorText_ = errorStream_.str();
9383 stream_.nBuffers = buffers;
9385 // Save buffer size (in sample frames).
9386 *bufferSize = ossBufferBytes / ( formatBytes(stream_.deviceFormat[mode]) * deviceChannels );
9387 stream_.bufferSize = *bufferSize;
9389 // Set the sample rate.
9390 int srate = sampleRate;
9391 result = ioctl( fd, SNDCTL_DSP_SPEED, &srate );
9392 if ( result == -1 ) {
9394 errorStream_ << "RtApiOss::probeDeviceOpen: error setting sample rate (" << sampleRate << ") on device (" << ainfo.name << ").";
9395 errorText_ = errorStream_.str();
9399 // Verify the sample rate setup worked.
9400 if ( abs( srate - (int)sampleRate ) > 100 ) {
9402 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support sample rate (" << sampleRate << ").";
9403 errorText_ = errorStream_.str();
9406 stream_.sampleRate = sampleRate;
9408 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device) {
9409 // We're doing duplex setup here.
9410 stream_.deviceFormat[0] = stream_.deviceFormat[1];
9411 stream_.nDeviceChannels[0] = deviceChannels;
9414 // Set interleaving parameters.
9415 stream_.userInterleaved = true;
9416 stream_.deviceInterleaved[mode] = true;
9417 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
9418 stream_.userInterleaved = false;
9420 // Set flags for buffer conversion
9421 stream_.doConvertBuffer[mode] = false;
9422 if ( stream_.userFormat != stream_.deviceFormat[mode] )
9423 stream_.doConvertBuffer[mode] = true;
9424 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
9425 stream_.doConvertBuffer[mode] = true;
9426 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
9427 stream_.nUserChannels[mode] > 1 )
9428 stream_.doConvertBuffer[mode] = true;
9430 // Allocate the stream handles if necessary and then save.
9431 if ( stream_.apiHandle == 0 ) {
9433 handle = new OssHandle;
9435 catch ( std::bad_alloc& ) {
9436 errorText_ = "RtApiOss::probeDeviceOpen: error allocating OssHandle memory.";
9440 if ( pthread_cond_init( &handle->runnable, NULL ) ) {
9441 errorText_ = "RtApiOss::probeDeviceOpen: error initializing pthread condition variable.";
9445 stream_.apiHandle = (void *) handle;
9448 handle = (OssHandle *) stream_.apiHandle;
9450 handle->id[mode] = fd;
9452 // Allocate necessary internal buffers.
9453 unsigned long bufferBytes;
9454 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
9455 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
9456 if ( stream_.userBuffer[mode] == NULL ) {
9457 errorText_ = "RtApiOss::probeDeviceOpen: error allocating user buffer memory.";
9461 if ( stream_.doConvertBuffer[mode] ) {
9463 bool makeBuffer = true;
9464 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
9465 if ( mode == INPUT ) {
9466 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
9467 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
9468 if ( bufferBytes <= bytesOut ) makeBuffer = false;
9473 bufferBytes *= *bufferSize;
9474 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
9475 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
9476 if ( stream_.deviceBuffer == NULL ) {
9477 errorText_ = "RtApiOss::probeDeviceOpen: error allocating device buffer memory.";
9483 stream_.device[mode] = device;
9484 stream_.state = STREAM_STOPPED;
9486 // Setup the buffer conversion information structure.
9487 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
9489 // Setup thread if necessary.
9490 if ( stream_.mode == OUTPUT && mode == INPUT ) {
9491 // We had already set up an output stream.
9492 stream_.mode = DUPLEX;
9493 if ( stream_.device[0] == device ) handle->id[0] = fd;
9496 stream_.mode = mode;
9498 // Setup callback thread.
9499 stream_.callbackInfo.object = (void *) this;
9501 // Set the thread attributes for joinable and realtime scheduling
9502 // priority. The higher priority will only take affect if the
9503 // program is run as root or suid.
9504 pthread_attr_t attr;
9505 pthread_attr_init( &attr );
9506 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
9507 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
9508 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
9509 stream_.callbackInfo.doRealtime = true;
9510 struct sched_param param;
9511 int priority = options->priority;
9512 int min = sched_get_priority_min( SCHED_RR );
9513 int max = sched_get_priority_max( SCHED_RR );
9514 if ( priority < min ) priority = min;
9515 else if ( priority > max ) priority = max;
9516 param.sched_priority = priority;
9518 // Set the policy BEFORE the priority. Otherwise it fails.
9519 pthread_attr_setschedpolicy(&attr, SCHED_RR);
9520 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
9521 // This is definitely required. Otherwise it fails.
9522 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
9523 pthread_attr_setschedparam(&attr, ¶m);
9526 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
9528 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
9531 stream_.callbackInfo.isRunning = true;
9532 result = pthread_create( &stream_.callbackInfo.thread, &attr, ossCallbackHandler, &stream_.callbackInfo );
9533 pthread_attr_destroy( &attr );
9535 // Failed. Try instead with default attributes.
9536 result = pthread_create( &stream_.callbackInfo.thread, NULL, ossCallbackHandler, &stream_.callbackInfo );
9538 stream_.callbackInfo.isRunning = false;
9539 errorText_ = "RtApiOss::error creating callback thread!";
9549 pthread_cond_destroy( &handle->runnable );
9550 if ( handle->id[0] ) close( handle->id[0] );
9551 if ( handle->id[1] ) close( handle->id[1] );
9553 stream_.apiHandle = 0;
9556 for ( int i=0; i<2; i++ ) {
9557 if ( stream_.userBuffer[i] ) {
9558 free( stream_.userBuffer[i] );
9559 stream_.userBuffer[i] = 0;
9563 if ( stream_.deviceBuffer ) {
9564 free( stream_.deviceBuffer );
9565 stream_.deviceBuffer = 0;
9568 stream_.state = STREAM_CLOSED;
9572 void RtApiOss :: closeStream()
9574 if ( stream_.state == STREAM_CLOSED ) {
9575 errorText_ = "RtApiOss::closeStream(): no open stream to close!";
9576 error( RtAudioError::WARNING );
9580 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9581 stream_.callbackInfo.isRunning = false;
9582 MUTEX_LOCK( &stream_.mutex );
9583 if ( stream_.state == STREAM_STOPPED )
9584 pthread_cond_signal( &handle->runnable );
9585 MUTEX_UNLOCK( &stream_.mutex );
9586 pthread_join( stream_.callbackInfo.thread, NULL );
9588 if ( stream_.state == STREAM_RUNNING ) {
9589 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
9590 ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9592 ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9593 stream_.state = STREAM_STOPPED;
9597 pthread_cond_destroy( &handle->runnable );
9598 if ( handle->id[0] ) close( handle->id[0] );
9599 if ( handle->id[1] ) close( handle->id[1] );
9601 stream_.apiHandle = 0;
9604 for ( int i=0; i<2; i++ ) {
9605 if ( stream_.userBuffer[i] ) {
9606 free( stream_.userBuffer[i] );
9607 stream_.userBuffer[i] = 0;
9611 if ( stream_.deviceBuffer ) {
9612 free( stream_.deviceBuffer );
9613 stream_.deviceBuffer = 0;
9616 stream_.mode = UNINITIALIZED;
9617 stream_.state = STREAM_CLOSED;
9620 void RtApiOss :: startStream()
9623 if ( stream_.state == STREAM_RUNNING ) {
9624 errorText_ = "RtApiOss::startStream(): the stream is already running!";
9625 error( RtAudioError::WARNING );
9629 MUTEX_LOCK( &stream_.mutex );
9631 #if defined( HAVE_GETTIMEOFDAY )
9632 gettimeofday( &stream_.lastTickTimestamp, NULL );
9635 stream_.state = STREAM_RUNNING;
9637 // No need to do anything else here ... OSS automatically starts
9638 // when fed samples.
9640 MUTEX_UNLOCK( &stream_.mutex );
9642 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9643 pthread_cond_signal( &handle->runnable );
9646 void RtApiOss :: stopStream()
9649 if ( stream_.state == STREAM_STOPPED ) {
9650 errorText_ = "RtApiOss::stopStream(): the stream is already stopped!";
9651 error( RtAudioError::WARNING );
9655 MUTEX_LOCK( &stream_.mutex );
9657 // The state might change while waiting on a mutex.
9658 if ( stream_.state == STREAM_STOPPED ) {
9659 MUTEX_UNLOCK( &stream_.mutex );
9664 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9665 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9667 // Flush the output with zeros a few times.
9670 RtAudioFormat format;
9672 if ( stream_.doConvertBuffer[0] ) {
9673 buffer = stream_.deviceBuffer;
9674 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
9675 format = stream_.deviceFormat[0];
9678 buffer = stream_.userBuffer[0];
9679 samples = stream_.bufferSize * stream_.nUserChannels[0];
9680 format = stream_.userFormat;
9683 memset( buffer, 0, samples * formatBytes(format) );
9684 for ( unsigned int i=0; i<stream_.nBuffers+1; i++ ) {
9685 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9686 if ( result == -1 ) {
9687 errorText_ = "RtApiOss::stopStream: audio write error.";
9688 error( RtAudioError::WARNING );
9692 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9693 if ( result == -1 ) {
9694 errorStream_ << "RtApiOss::stopStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
9695 errorText_ = errorStream_.str();
9698 handle->triggered = false;
9701 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
9702 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9703 if ( result == -1 ) {
9704 errorStream_ << "RtApiOss::stopStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
9705 errorText_ = errorStream_.str();
9711 stream_.state = STREAM_STOPPED;
9712 MUTEX_UNLOCK( &stream_.mutex );
9714 if ( result != -1 ) return;
9715 error( RtAudioError::SYSTEM_ERROR );
9718 void RtApiOss :: abortStream()
9721 if ( stream_.state == STREAM_STOPPED ) {
9722 errorText_ = "RtApiOss::abortStream(): the stream is already stopped!";
9723 error( RtAudioError::WARNING );
9727 MUTEX_LOCK( &stream_.mutex );
9729 // The state might change while waiting on a mutex.
9730 if ( stream_.state == STREAM_STOPPED ) {
9731 MUTEX_UNLOCK( &stream_.mutex );
9736 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9737 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9738 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9739 if ( result == -1 ) {
9740 errorStream_ << "RtApiOss::abortStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
9741 errorText_ = errorStream_.str();
9744 handle->triggered = false;
9747 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
9748 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9749 if ( result == -1 ) {
9750 errorStream_ << "RtApiOss::abortStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
9751 errorText_ = errorStream_.str();
9757 stream_.state = STREAM_STOPPED;
9758 MUTEX_UNLOCK( &stream_.mutex );
9760 if ( result != -1 ) return;
9761 error( RtAudioError::SYSTEM_ERROR );
9764 void RtApiOss :: callbackEvent()
9766 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9767 if ( stream_.state == STREAM_STOPPED ) {
9768 MUTEX_LOCK( &stream_.mutex );
9769 pthread_cond_wait( &handle->runnable, &stream_.mutex );
9770 if ( stream_.state != STREAM_RUNNING ) {
9771 MUTEX_UNLOCK( &stream_.mutex );
9774 MUTEX_UNLOCK( &stream_.mutex );
9777 if ( stream_.state == STREAM_CLOSED ) {
9778 errorText_ = "RtApiOss::callbackEvent(): the stream is closed ... this shouldn't happen!";
9779 error( RtAudioError::WARNING );
9783 // Invoke user callback to get fresh output data.
9784 int doStopStream = 0;
9785 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
9786 double streamTime = getStreamTime();
9787 RtAudioStreamStatus status = 0;
9788 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
9789 status |= RTAUDIO_OUTPUT_UNDERFLOW;
9790 handle->xrun[0] = false;
9792 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
9793 status |= RTAUDIO_INPUT_OVERFLOW;
9794 handle->xrun[1] = false;
9796 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
9797 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
9798 if ( doStopStream == 2 ) {
9799 this->abortStream();
9803 MUTEX_LOCK( &stream_.mutex );
9805 // The state might change while waiting on a mutex.
9806 if ( stream_.state == STREAM_STOPPED ) goto unlock;
9811 RtAudioFormat format;
9813 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9815 // Setup parameters and do buffer conversion if necessary.
9816 if ( stream_.doConvertBuffer[0] ) {
9817 buffer = stream_.deviceBuffer;
9818 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
9819 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
9820 format = stream_.deviceFormat[0];
9823 buffer = stream_.userBuffer[0];
9824 samples = stream_.bufferSize * stream_.nUserChannels[0];
9825 format = stream_.userFormat;
9828 // Do byte swapping if necessary.
9829 if ( stream_.doByteSwap[0] )
9830 byteSwapBuffer( buffer, samples, format );
9832 if ( stream_.mode == DUPLEX && handle->triggered == false ) {
9834 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
9835 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9836 trig = PCM_ENABLE_INPUT|PCM_ENABLE_OUTPUT;
9837 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
9838 handle->triggered = true;
9841 // Write samples to device.
9842 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9844 if ( result == -1 ) {
9845 // We'll assume this is an underrun, though there isn't a
9846 // specific means for determining that.
9847 handle->xrun[0] = true;
9848 errorText_ = "RtApiOss::callbackEvent: audio write error.";
9849 error( RtAudioError::WARNING );
9850 // Continue on to input section.
9854 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
9856 // Setup parameters.
9857 if ( stream_.doConvertBuffer[1] ) {
9858 buffer = stream_.deviceBuffer;
9859 samples = stream_.bufferSize * stream_.nDeviceChannels[1];
9860 format = stream_.deviceFormat[1];
9863 buffer = stream_.userBuffer[1];
9864 samples = stream_.bufferSize * stream_.nUserChannels[1];
9865 format = stream_.userFormat;
9868 // Read samples from device.
9869 result = read( handle->id[1], buffer, samples * formatBytes(format) );
9871 if ( result == -1 ) {
9872 // We'll assume this is an overrun, though there isn't a
9873 // specific means for determining that.
9874 handle->xrun[1] = true;
9875 errorText_ = "RtApiOss::callbackEvent: audio read error.";
9876 error( RtAudioError::WARNING );
9880 // Do byte swapping if necessary.
9881 if ( stream_.doByteSwap[1] )
9882 byteSwapBuffer( buffer, samples, format );
9884 // Do buffer conversion if necessary.
9885 if ( stream_.doConvertBuffer[1] )
9886 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
9890 MUTEX_UNLOCK( &stream_.mutex );
9892 RtApi::tickStreamTime();
9893 if ( doStopStream == 1 ) this->stopStream();
9896 static void *ossCallbackHandler( void *ptr )
9898 CallbackInfo *info = (CallbackInfo *) ptr;
9899 RtApiOss *object = (RtApiOss *) info->object;
9900 bool *isRunning = &info->isRunning;
9902 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
9903 if (info->doRealtime) {
9904 std::cerr << "RtAudio oss: " <<
9905 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
9906 "running realtime scheduling" << std::endl;
9910 while ( *isRunning == true ) {
9911 pthread_testcancel();
9912 object->callbackEvent();
9915 pthread_exit( NULL );
9918 //******************** End of __LINUX_OSS__ *********************//
9922 // *************************************************** //
9924 // Protected common (OS-independent) RtAudio methods.
9926 // *************************************************** //
9928 // This method can be modified to control the behavior of error
9929 // message printing.
9930 void RtApi :: error( RtAudioError::Type type )
9932 errorStream_.str(""); // clear the ostringstream
9934 RtAudioErrorCallback errorCallback = (RtAudioErrorCallback) stream_.callbackInfo.errorCallback;
9935 if ( errorCallback ) {
9936 // abortStream() can generate new error messages. Ignore them. Just keep original one.
9938 if ( firstErrorOccurred_ )
9941 firstErrorOccurred_ = true;
9942 const std::string errorMessage = errorText_;
9944 if ( type != RtAudioError::WARNING && stream_.state != STREAM_STOPPED) {
9945 stream_.callbackInfo.isRunning = false; // exit from the thread
9949 errorCallback( type, errorMessage );
9950 firstErrorOccurred_ = false;
9954 if ( type == RtAudioError::WARNING && showWarnings_ == true )
9955 std::cerr << '\n' << errorText_ << "\n\n";
9956 else if ( type != RtAudioError::WARNING )
9957 throw( RtAudioError( errorText_, type ) );
9960 void RtApi :: verifyStream()
9962 if ( stream_.state == STREAM_CLOSED ) {
9963 errorText_ = "RtApi:: a stream is not open!";
9964 error( RtAudioError::INVALID_USE );
9968 void RtApi :: clearStreamInfo()
9970 stream_.mode = UNINITIALIZED;
9971 stream_.state = STREAM_CLOSED;
9972 stream_.sampleRate = 0;
9973 stream_.bufferSize = 0;
9974 stream_.nBuffers = 0;
9975 stream_.userFormat = 0;
9976 stream_.userInterleaved = true;
9977 stream_.streamTime = 0.0;
9978 stream_.apiHandle = 0;
9979 stream_.deviceBuffer = 0;
9980 stream_.callbackInfo.callback = 0;
9981 stream_.callbackInfo.userData = 0;
9982 stream_.callbackInfo.isRunning = false;
9983 stream_.callbackInfo.errorCallback = 0;
9984 for ( int i=0; i<2; i++ ) {
9985 stream_.device[i] = 11111;
9986 stream_.doConvertBuffer[i] = false;
9987 stream_.deviceInterleaved[i] = true;
9988 stream_.doByteSwap[i] = false;
9989 stream_.nUserChannels[i] = 0;
9990 stream_.nDeviceChannels[i] = 0;
9991 stream_.channelOffset[i] = 0;
9992 stream_.deviceFormat[i] = 0;
9993 stream_.latency[i] = 0;
9994 stream_.userBuffer[i] = 0;
9995 stream_.convertInfo[i].channels = 0;
9996 stream_.convertInfo[i].inJump = 0;
9997 stream_.convertInfo[i].outJump = 0;
9998 stream_.convertInfo[i].inFormat = 0;
9999 stream_.convertInfo[i].outFormat = 0;
10000 stream_.convertInfo[i].inOffset.clear();
10001 stream_.convertInfo[i].outOffset.clear();
10005 unsigned int RtApi :: formatBytes( RtAudioFormat format )
10007 if ( format == RTAUDIO_SINT16 )
10009 else if ( format == RTAUDIO_SINT32 || format == RTAUDIO_FLOAT32 )
10011 else if ( format == RTAUDIO_FLOAT64 )
10013 else if ( format == RTAUDIO_SINT24 )
10015 else if ( format == RTAUDIO_SINT8 )
10018 errorText_ = "RtApi::formatBytes: undefined format.";
10019 error( RtAudioError::WARNING );
10024 void RtApi :: setConvertInfo( StreamMode mode, unsigned int firstChannel )
10026 if ( mode == INPUT ) { // convert device to user buffer
10027 stream_.convertInfo[mode].inJump = stream_.nDeviceChannels[1];
10028 stream_.convertInfo[mode].outJump = stream_.nUserChannels[1];
10029 stream_.convertInfo[mode].inFormat = stream_.deviceFormat[1];
10030 stream_.convertInfo[mode].outFormat = stream_.userFormat;
10032 else { // convert user to device buffer
10033 stream_.convertInfo[mode].inJump = stream_.nUserChannels[0];
10034 stream_.convertInfo[mode].outJump = stream_.nDeviceChannels[0];
10035 stream_.convertInfo[mode].inFormat = stream_.userFormat;
10036 stream_.convertInfo[mode].outFormat = stream_.deviceFormat[0];
10039 if ( stream_.convertInfo[mode].inJump < stream_.convertInfo[mode].outJump )
10040 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].inJump;
10042 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].outJump;
10044 // Set up the interleave/deinterleave offsets.
10045 if ( stream_.deviceInterleaved[mode] != stream_.userInterleaved ) {
10046 if ( ( mode == OUTPUT && stream_.deviceInterleaved[mode] ) ||
10047 ( mode == INPUT && stream_.userInterleaved ) ) {
10048 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
10049 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
10050 stream_.convertInfo[mode].outOffset.push_back( k );
10051 stream_.convertInfo[mode].inJump = 1;
10055 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
10056 stream_.convertInfo[mode].inOffset.push_back( k );
10057 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
10058 stream_.convertInfo[mode].outJump = 1;
10062 else { // no (de)interleaving
10063 if ( stream_.userInterleaved ) {
10064 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
10065 stream_.convertInfo[mode].inOffset.push_back( k );
10066 stream_.convertInfo[mode].outOffset.push_back( k );
10070 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
10071 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
10072 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
10073 stream_.convertInfo[mode].inJump = 1;
10074 stream_.convertInfo[mode].outJump = 1;
10079 // Add channel offset.
10080 if ( firstChannel > 0 ) {
10081 if ( stream_.deviceInterleaved[mode] ) {
10082 if ( mode == OUTPUT ) {
10083 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
10084 stream_.convertInfo[mode].outOffset[k] += firstChannel;
10087 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
10088 stream_.convertInfo[mode].inOffset[k] += firstChannel;
10092 if ( mode == OUTPUT ) {
10093 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
10094 stream_.convertInfo[mode].outOffset[k] += ( firstChannel * stream_.bufferSize );
10097 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
10098 stream_.convertInfo[mode].inOffset[k] += ( firstChannel * stream_.bufferSize );
10104 void RtApi :: convertBuffer( char *outBuffer, char *inBuffer, ConvertInfo &info )
10106 // This function does format conversion, input/output channel compensation, and
10107 // data interleaving/deinterleaving. 24-bit integers are assumed to occupy
10108 // the lower three bytes of a 32-bit integer.
10110 // Clear our device buffer when in/out duplex device channels are different
10111 if ( outBuffer == stream_.deviceBuffer && stream_.mode == DUPLEX &&
10112 ( stream_.nDeviceChannels[0] < stream_.nDeviceChannels[1] ) )
10113 memset( outBuffer, 0, stream_.bufferSize * info.outJump * formatBytes( info.outFormat ) );
10116 if (info.outFormat == RTAUDIO_FLOAT64) {
10118 Float64 *out = (Float64 *)outBuffer;
10120 if (info.inFormat == RTAUDIO_SINT8) {
10121 signed char *in = (signed char *)inBuffer;
10122 scale = 1.0 / 127.5;
10123 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10124 for (j=0; j<info.channels; j++) {
10125 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
10126 out[info.outOffset[j]] += 0.5;
10127 out[info.outOffset[j]] *= scale;
10130 out += info.outJump;
10133 else if (info.inFormat == RTAUDIO_SINT16) {
10134 Int16 *in = (Int16 *)inBuffer;
10135 scale = 1.0 / 32767.5;
10136 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10137 for (j=0; j<info.channels; j++) {
10138 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
10139 out[info.outOffset[j]] += 0.5;
10140 out[info.outOffset[j]] *= scale;
10143 out += info.outJump;
10146 else if (info.inFormat == RTAUDIO_SINT24) {
10147 Int24 *in = (Int24 *)inBuffer;
10148 scale = 1.0 / 8388607.5;
10149 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10150 for (j=0; j<info.channels; j++) {
10151 out[info.outOffset[j]] = (Float64) (in[info.inOffset[j]].asInt());
10152 out[info.outOffset[j]] += 0.5;
10153 out[info.outOffset[j]] *= scale;
10156 out += info.outJump;
10159 else if (info.inFormat == RTAUDIO_SINT32) {
10160 Int32 *in = (Int32 *)inBuffer;
10161 scale = 1.0 / 2147483647.5;
10162 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10163 for (j=0; j<info.channels; j++) {
10164 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
10165 out[info.outOffset[j]] += 0.5;
10166 out[info.outOffset[j]] *= scale;
10169 out += info.outJump;
10172 else if (info.inFormat == RTAUDIO_FLOAT32) {
10173 Float32 *in = (Float32 *)inBuffer;
10174 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10175 for (j=0; j<info.channels; j++) {
10176 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
10179 out += info.outJump;
10182 else if (info.inFormat == RTAUDIO_FLOAT64) {
10183 // Channel compensation and/or (de)interleaving only.
10184 Float64 *in = (Float64 *)inBuffer;
10185 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10186 for (j=0; j<info.channels; j++) {
10187 out[info.outOffset[j]] = in[info.inOffset[j]];
10190 out += info.outJump;
10194 else if (info.outFormat == RTAUDIO_FLOAT32) {
10196 Float32 *out = (Float32 *)outBuffer;
10198 if (info.inFormat == RTAUDIO_SINT8) {
10199 signed char *in = (signed char *)inBuffer;
10200 scale = (Float32) ( 1.0 / 127.5 );
10201 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10202 for (j=0; j<info.channels; j++) {
10203 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10204 out[info.outOffset[j]] += 0.5;
10205 out[info.outOffset[j]] *= scale;
10208 out += info.outJump;
10211 else if (info.inFormat == RTAUDIO_SINT16) {
10212 Int16 *in = (Int16 *)inBuffer;
10213 scale = (Float32) ( 1.0 / 32767.5 );
10214 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10215 for (j=0; j<info.channels; j++) {
10216 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10217 out[info.outOffset[j]] += 0.5;
10218 out[info.outOffset[j]] *= scale;
10221 out += info.outJump;
10224 else if (info.inFormat == RTAUDIO_SINT24) {
10225 Int24 *in = (Int24 *)inBuffer;
10226 scale = (Float32) ( 1.0 / 8388607.5 );
10227 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10228 for (j=0; j<info.channels; j++) {
10229 out[info.outOffset[j]] = (Float32) (in[info.inOffset[j]].asInt());
10230 out[info.outOffset[j]] += 0.5;
10231 out[info.outOffset[j]] *= scale;
10234 out += info.outJump;
10237 else if (info.inFormat == RTAUDIO_SINT32) {
10238 Int32 *in = (Int32 *)inBuffer;
10239 scale = (Float32) ( 1.0 / 2147483647.5 );
10240 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10241 for (j=0; j<info.channels; j++) {
10242 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10243 out[info.outOffset[j]] += 0.5;
10244 out[info.outOffset[j]] *= scale;
10247 out += info.outJump;
10250 else if (info.inFormat == RTAUDIO_FLOAT32) {
10251 // Channel compensation and/or (de)interleaving only.
10252 Float32 *in = (Float32 *)inBuffer;
10253 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10254 for (j=0; j<info.channels; j++) {
10255 out[info.outOffset[j]] = in[info.inOffset[j]];
10258 out += info.outJump;
10261 else if (info.inFormat == RTAUDIO_FLOAT64) {
10262 Float64 *in = (Float64 *)inBuffer;
10263 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10264 for (j=0; j<info.channels; j++) {
10265 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10268 out += info.outJump;
10272 else if (info.outFormat == RTAUDIO_SINT32) {
10273 Int32 *out = (Int32 *)outBuffer;
10274 if (info.inFormat == RTAUDIO_SINT8) {
10275 signed char *in = (signed char *)inBuffer;
10276 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10277 for (j=0; j<info.channels; j++) {
10278 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
10279 out[info.outOffset[j]] <<= 24;
10282 out += info.outJump;
10285 else if (info.inFormat == RTAUDIO_SINT16) {
10286 Int16 *in = (Int16 *)inBuffer;
10287 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10288 for (j=0; j<info.channels; j++) {
10289 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
10290 out[info.outOffset[j]] <<= 16;
10293 out += info.outJump;
10296 else if (info.inFormat == RTAUDIO_SINT24) {
10297 Int24 *in = (Int24 *)inBuffer;
10298 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10299 for (j=0; j<info.channels; j++) {
10300 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]].asInt();
10301 out[info.outOffset[j]] <<= 8;
10304 out += info.outJump;
10307 else if (info.inFormat == RTAUDIO_SINT32) {
10308 // Channel compensation and/or (de)interleaving only.
10309 Int32 *in = (Int32 *)inBuffer;
10310 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10311 for (j=0; j<info.channels; j++) {
10312 out[info.outOffset[j]] = in[info.inOffset[j]];
10315 out += info.outJump;
10318 else if (info.inFormat == RTAUDIO_FLOAT32) {
10319 Float32 *in = (Float32 *)inBuffer;
10320 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10321 for (j=0; j<info.channels; j++) {
10322 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
10325 out += info.outJump;
10328 else if (info.inFormat == RTAUDIO_FLOAT64) {
10329 Float64 *in = (Float64 *)inBuffer;
10330 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10331 for (j=0; j<info.channels; j++) {
10332 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
10335 out += info.outJump;
10339 else if (info.outFormat == RTAUDIO_SINT24) {
10340 Int24 *out = (Int24 *)outBuffer;
10341 if (info.inFormat == RTAUDIO_SINT8) {
10342 signed char *in = (signed char *)inBuffer;
10343 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10344 for (j=0; j<info.channels; j++) {
10345 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 16);
10346 //out[info.outOffset[j]] <<= 16;
10349 out += info.outJump;
10352 else if (info.inFormat == RTAUDIO_SINT16) {
10353 Int16 *in = (Int16 *)inBuffer;
10354 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10355 for (j=0; j<info.channels; j++) {
10356 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 8);
10357 //out[info.outOffset[j]] <<= 8;
10360 out += info.outJump;
10363 else if (info.inFormat == RTAUDIO_SINT24) {
10364 // Channel compensation and/or (de)interleaving only.
10365 Int24 *in = (Int24 *)inBuffer;
10366 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10367 for (j=0; j<info.channels; j++) {
10368 out[info.outOffset[j]] = in[info.inOffset[j]];
10371 out += info.outJump;
10374 else if (info.inFormat == RTAUDIO_SINT32) {
10375 Int32 *in = (Int32 *)inBuffer;
10376 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10377 for (j=0; j<info.channels; j++) {
10378 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] >> 8);
10379 //out[info.outOffset[j]] >>= 8;
10382 out += info.outJump;
10385 else if (info.inFormat == RTAUDIO_FLOAT32) {
10386 Float32 *in = (Float32 *)inBuffer;
10387 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10388 for (j=0; j<info.channels; j++) {
10389 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
10392 out += info.outJump;
10395 else if (info.inFormat == RTAUDIO_FLOAT64) {
10396 Float64 *in = (Float64 *)inBuffer;
10397 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10398 for (j=0; j<info.channels; j++) {
10399 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
10402 out += info.outJump;
10406 else if (info.outFormat == RTAUDIO_SINT16) {
10407 Int16 *out = (Int16 *)outBuffer;
10408 if (info.inFormat == RTAUDIO_SINT8) {
10409 signed char *in = (signed char *)inBuffer;
10410 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10411 for (j=0; j<info.channels; j++) {
10412 out[info.outOffset[j]] = (Int16) in[info.inOffset[j]];
10413 out[info.outOffset[j]] <<= 8;
10416 out += info.outJump;
10419 else if (info.inFormat == RTAUDIO_SINT16) {
10420 // Channel compensation and/or (de)interleaving only.
10421 Int16 *in = (Int16 *)inBuffer;
10422 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10423 for (j=0; j<info.channels; j++) {
10424 out[info.outOffset[j]] = in[info.inOffset[j]];
10427 out += info.outJump;
10430 else if (info.inFormat == RTAUDIO_SINT24) {
10431 Int24 *in = (Int24 *)inBuffer;
10432 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10433 for (j=0; j<info.channels; j++) {
10434 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]].asInt() >> 8);
10437 out += info.outJump;
10440 else if (info.inFormat == RTAUDIO_SINT32) {
10441 Int32 *in = (Int32 *)inBuffer;
10442 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10443 for (j=0; j<info.channels; j++) {
10444 out[info.outOffset[j]] = (Int16) ((in[info.inOffset[j]] >> 16) & 0x0000ffff);
10447 out += info.outJump;
10450 else if (info.inFormat == RTAUDIO_FLOAT32) {
10451 Float32 *in = (Float32 *)inBuffer;
10452 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10453 for (j=0; j<info.channels; j++) {
10454 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
10457 out += info.outJump;
10460 else if (info.inFormat == RTAUDIO_FLOAT64) {
10461 Float64 *in = (Float64 *)inBuffer;
10462 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10463 for (j=0; j<info.channels; j++) {
10464 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
10467 out += info.outJump;
10471 else if (info.outFormat == RTAUDIO_SINT8) {
10472 signed char *out = (signed char *)outBuffer;
10473 if (info.inFormat == RTAUDIO_SINT8) {
10474 // Channel compensation and/or (de)interleaving only.
10475 signed char *in = (signed char *)inBuffer;
10476 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10477 for (j=0; j<info.channels; j++) {
10478 out[info.outOffset[j]] = in[info.inOffset[j]];
10481 out += info.outJump;
10484 if (info.inFormat == RTAUDIO_SINT16) {
10485 Int16 *in = (Int16 *)inBuffer;
10486 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10487 for (j=0; j<info.channels; j++) {
10488 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 8) & 0x00ff);
10491 out += info.outJump;
10494 else if (info.inFormat == RTAUDIO_SINT24) {
10495 Int24 *in = (Int24 *)inBuffer;
10496 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10497 for (j=0; j<info.channels; j++) {
10498 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]].asInt() >> 16);
10501 out += info.outJump;
10504 else if (info.inFormat == RTAUDIO_SINT32) {
10505 Int32 *in = (Int32 *)inBuffer;
10506 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10507 for (j=0; j<info.channels; j++) {
10508 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 24) & 0x000000ff);
10511 out += info.outJump;
10514 else if (info.inFormat == RTAUDIO_FLOAT32) {
10515 Float32 *in = (Float32 *)inBuffer;
10516 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10517 for (j=0; j<info.channels; j++) {
10518 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
10521 out += info.outJump;
10524 else if (info.inFormat == RTAUDIO_FLOAT64) {
10525 Float64 *in = (Float64 *)inBuffer;
10526 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10527 for (j=0; j<info.channels; j++) {
10528 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
10531 out += info.outJump;
10537 //static inline uint16_t bswap_16(uint16_t x) { return (x>>8) | (x<<8); }
10538 //static inline uint32_t bswap_32(uint32_t x) { return (bswap_16(x&0xffff)<<16) | (bswap_16(x>>16)); }
10539 //static inline uint64_t bswap_64(uint64_t x) { return (((unsigned long long)bswap_32(x&0xffffffffull))<<32) | (bswap_32(x>>32)); }
10541 void RtApi :: byteSwapBuffer( char *buffer, unsigned int samples, RtAudioFormat format )
10547 if ( format == RTAUDIO_SINT16 ) {
10548 for ( unsigned int i=0; i<samples; i++ ) {
10549 // Swap 1st and 2nd bytes.
10554 // Increment 2 bytes.
10558 else if ( format == RTAUDIO_SINT32 ||
10559 format == RTAUDIO_FLOAT32 ) {
10560 for ( unsigned int i=0; i<samples; i++ ) {
10561 // Swap 1st and 4th bytes.
10566 // Swap 2nd and 3rd bytes.
10572 // Increment 3 more bytes.
10576 else if ( format == RTAUDIO_SINT24 ) {
10577 for ( unsigned int i=0; i<samples; i++ ) {
10578 // Swap 1st and 3rd bytes.
10583 // Increment 2 more bytes.
10587 else if ( format == RTAUDIO_FLOAT64 ) {
10588 for ( unsigned int i=0; i<samples; i++ ) {
10589 // Swap 1st and 8th bytes
10594 // Swap 2nd and 7th bytes
10600 // Swap 3rd and 6th bytes
10606 // Swap 4th and 5th bytes
10612 // Increment 5 more bytes.
10618 // Indentation settings for Vim and Emacs
10620 // Local Variables:
10621 // c-basic-offset: 2
10622 // indent-tabs-mode: nil
10625 // vim: et sts=2 sw=2