1 /************************************************************************/
3 \brief Realtime audio i/o C++ classes.
5 RtAudio provides a common API (Application Programming Interface)
6 for realtime audio input/output across Linux (native ALSA, Jack,
7 and OSS), Macintosh OS X (CoreAudio and Jack), and Windows
8 (DirectSound, ASIO and WASAPI) operating systems.
10 RtAudio WWW site: http://www.music.mcgill.ca/~gary/rtaudio/
12 RtAudio: realtime audio i/o C++ classes
13 Copyright (c) 2001-2017 Gary P. Scavone
15 Permission is hereby granted, free of charge, to any person
16 obtaining a copy of this software and associated documentation files
17 (the "Software"), to deal in the Software without restriction,
18 including without limitation the rights to use, copy, modify, merge,
19 publish, distribute, sublicense, and/or sell copies of the Software,
20 and to permit persons to whom the Software is furnished to do so,
21 subject to the following conditions:
23 The above copyright notice and this permission notice shall be
24 included in all copies or substantial portions of the Software.
26 Any person wishing to distribute modifications to the Software is
27 asked to send the modifications to the original developer so that
28 they can be incorporated into the canonical version. This is,
29 however, not a binding provision of this license.
31 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
32 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
33 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
34 IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
35 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
36 CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
37 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
39 /************************************************************************/
41 // RtAudio: Version 5.0.0
51 // Static variable definitions.
52 const unsigned int RtApi::MAX_SAMPLE_RATES = 14;
53 const unsigned int RtApi::SAMPLE_RATES[] = {
54 4000, 5512, 8000, 9600, 11025, 16000, 22050,
55 32000, 44100, 48000, 88200, 96000, 176400, 192000
58 #if defined(__WINDOWS_DS__) || defined(__WINDOWS_ASIO__) || defined(__WINDOWS_WASAPI__)
59 #define MUTEX_INITIALIZE(A) InitializeCriticalSection(A)
60 #define MUTEX_DESTROY(A) DeleteCriticalSection(A)
61 #define MUTEX_LOCK(A) EnterCriticalSection(A)
62 #define MUTEX_UNLOCK(A) LeaveCriticalSection(A)
66 static std::string convertCharPointerToStdString(const char *text)
68 return std::string(text);
71 static std::string convertCharPointerToStdString(const wchar_t *text)
73 int length = WideCharToMultiByte(CP_UTF8, 0, text, -1, NULL, 0, NULL, NULL);
74 std::string s( length-1, '\0' );
75 WideCharToMultiByte(CP_UTF8, 0, text, -1, &s[0], length, NULL, NULL);
79 #elif defined(__LINUX_ALSA__) || defined(__LINUX_PULSE__) || defined(__UNIX_JACK__) || defined(__LINUX_OSS__) || defined(__MACOSX_CORE__)
81 #define MUTEX_INITIALIZE(A) pthread_mutex_init(A, NULL)
82 #define MUTEX_DESTROY(A) pthread_mutex_destroy(A)
83 #define MUTEX_LOCK(A) pthread_mutex_lock(A)
84 #define MUTEX_UNLOCK(A) pthread_mutex_unlock(A)
86 #define MUTEX_INITIALIZE(A) abs(*A) // dummy definitions
87 #define MUTEX_DESTROY(A) abs(*A) // dummy definitions
90 // *************************************************** //
92 // RtAudio definitions.
94 // *************************************************** //
96 std::string RtAudio :: getVersion( void )
98 return RTAUDIO_VERSION;
101 // Define API names and display names.
102 // Must be in same order as API enum.
104 const char* rtaudio_api_names[][2] = {
105 { "unspecified" , "Unknown" },
107 { "pulse" , "Pulse" },
108 { "oss" , "OpenSoundSystem" },
110 { "core" , "CoreAudio" },
111 { "wasapi" , "WASAPI" },
113 { "ds" , "DirectSound" },
114 { "dummy" , "Dummy" },
116 const unsigned int rtaudio_num_api_names =
117 sizeof(rtaudio_api_names)/sizeof(rtaudio_api_names[0]);
119 // The order here will control the order of RtAudio's API search in
121 extern "C" const RtAudio::Api rtaudio_compiled_apis[] = {
122 #if defined(__UNIX_JACK__)
125 #if defined(__LINUX_PULSE__)
126 RtAudio::LINUX_PULSE,
128 #if defined(__LINUX_ALSA__)
131 #if defined(__LINUX_OSS__)
134 #if defined(__WINDOWS_ASIO__)
135 RtAudio::WINDOWS_ASIO,
137 #if defined(__WINDOWS_WASAPI__)
138 RtAudio::WINDOWS_WASAPI,
140 #if defined(__WINDOWS_DS__)
143 #if defined(__MACOSX_CORE__)
144 RtAudio::MACOSX_CORE,
146 #if defined(__RTAUDIO_DUMMY__)
147 RtAudio::RTAUDIO_DUMMY,
149 RtAudio::UNSPECIFIED,
151 extern "C" const unsigned int rtaudio_num_compiled_apis =
152 sizeof(rtaudio_compiled_apis)/sizeof(rtaudio_compiled_apis[0])-1;
155 // This is a compile-time check that rtaudio_num_api_names == RtAudio::NUM_APIS.
156 // If the build breaks here, check that they match.
157 template<bool b> class StaticAssert { private: StaticAssert() {} };
158 template<> class StaticAssert<true>{ public: StaticAssert() {} };
159 class StaticAssertions { StaticAssertions() {
160 StaticAssert<rtaudio_num_api_names == RtAudio::NUM_APIS>();
163 void RtAudio :: getCompiledApi( std::vector<RtAudio::Api> &apis )
165 apis = std::vector<RtAudio::Api>(rtaudio_compiled_apis,
166 rtaudio_compiled_apis + rtaudio_num_compiled_apis);
169 std::string RtAudio :: getApiName( RtAudio::Api api )
171 if (api < 0 || api >= RtAudio::NUM_APIS)
173 return rtaudio_api_names[api][0];
176 std::string RtAudio :: getApiDisplayName( RtAudio::Api api )
178 if (api < 0 || api >= RtAudio::NUM_APIS)
180 return rtaudio_api_names[api][1];
183 RtAudio::Api RtAudio :: getCompiledApiByName( const std::string &name )
186 for (i = 0; i < rtaudio_num_compiled_apis; ++i)
187 if (name == rtaudio_api_names[rtaudio_compiled_apis[i]][0])
188 return rtaudio_compiled_apis[i];
189 return RtAudio::UNSPECIFIED;
192 void RtAudio :: openRtApi( RtAudio::Api api )
198 #if defined(__UNIX_JACK__)
199 if ( api == UNIX_JACK )
200 rtapi_ = new RtApiJack();
202 #if defined(__LINUX_ALSA__)
203 if ( api == LINUX_ALSA )
204 rtapi_ = new RtApiAlsa();
206 #if defined(__LINUX_PULSE__)
207 if ( api == LINUX_PULSE )
208 rtapi_ = new RtApiPulse();
210 #if defined(__LINUX_OSS__)
211 if ( api == LINUX_OSS )
212 rtapi_ = new RtApiOss();
214 #if defined(__WINDOWS_ASIO__)
215 if ( api == WINDOWS_ASIO )
216 rtapi_ = new RtApiAsio();
218 #if defined(__WINDOWS_WASAPI__)
219 if ( api == WINDOWS_WASAPI )
220 rtapi_ = new RtApiWasapi();
222 #if defined(__WINDOWS_DS__)
223 if ( api == WINDOWS_DS )
224 rtapi_ = new RtApiDs();
226 #if defined(__MACOSX_CORE__)
227 if ( api == MACOSX_CORE )
228 rtapi_ = new RtApiCore();
230 #if defined(__RTAUDIO_DUMMY__)
231 if ( api == RTAUDIO_DUMMY )
232 rtapi_ = new RtApiDummy();
236 RtAudio :: RtAudio( RtAudio::Api api )
240 if ( api != UNSPECIFIED ) {
241 // Attempt to open the specified API.
243 if ( rtapi_ ) return;
245 // No compiled support for specified API value. Issue a debug
246 // warning and continue as if no API was specified.
247 std::cerr << "\nRtAudio: no compiled support for specified API argument!\n" << std::endl;
250 // Iterate through the compiled APIs and return as soon as we find
251 // one with at least one device or we reach the end of the list.
252 std::vector< RtAudio::Api > apis;
253 getCompiledApi( apis );
254 for ( unsigned int i=0; i<apis.size(); i++ ) {
255 openRtApi( apis[i] );
256 if ( rtapi_ && rtapi_->getDeviceCount() ) break;
259 if ( rtapi_ ) return;
261 // It should not be possible to get here because the preprocessor
262 // definition __RTAUDIO_DUMMY__ is automatically defined if no
263 // API-specific definitions are passed to the compiler. But just in
264 // case something weird happens, we'll thow an error.
265 std::string errorText = "\nRtAudio: no compiled API support found ... critical error!!\n\n";
266 throw( RtAudioError( errorText, RtAudioError::UNSPECIFIED ) );
269 RtAudio :: ~RtAudio()
275 void RtAudio :: openStream( RtAudio::StreamParameters *outputParameters,
276 RtAudio::StreamParameters *inputParameters,
277 RtAudioFormat format, unsigned int sampleRate,
278 unsigned int *bufferFrames,
279 RtAudioCallback callback, void *userData,
280 RtAudio::StreamOptions *options,
281 RtAudioErrorCallback errorCallback )
283 return rtapi_->openStream( outputParameters, inputParameters, format,
284 sampleRate, bufferFrames, callback,
285 userData, options, errorCallback );
288 // *************************************************** //
290 // Public RtApi definitions (see end of file for
291 // private or protected utility functions).
293 // *************************************************** //
297 stream_.state = STREAM_CLOSED;
298 stream_.mode = UNINITIALIZED;
299 stream_.apiHandle = 0;
300 stream_.userBuffer[0] = 0;
301 stream_.userBuffer[1] = 0;
302 MUTEX_INITIALIZE( &stream_.mutex );
303 showWarnings_ = true;
304 firstErrorOccurred_ = false;
309 MUTEX_DESTROY( &stream_.mutex );
312 void RtApi :: openStream( RtAudio::StreamParameters *oParams,
313 RtAudio::StreamParameters *iParams,
314 RtAudioFormat format, unsigned int sampleRate,
315 unsigned int *bufferFrames,
316 RtAudioCallback callback, void *userData,
317 RtAudio::StreamOptions *options,
318 RtAudioErrorCallback errorCallback )
320 if ( stream_.state != STREAM_CLOSED ) {
321 errorText_ = "RtApi::openStream: a stream is already open!";
322 error( RtAudioError::INVALID_USE );
326 // Clear stream information potentially left from a previously open stream.
329 if ( oParams && oParams->nChannels < 1 ) {
330 errorText_ = "RtApi::openStream: a non-NULL output StreamParameters structure cannot have an nChannels value less than one.";
331 error( RtAudioError::INVALID_USE );
335 if ( iParams && iParams->nChannels < 1 ) {
336 errorText_ = "RtApi::openStream: a non-NULL input StreamParameters structure cannot have an nChannels value less than one.";
337 error( RtAudioError::INVALID_USE );
341 if ( oParams == NULL && iParams == NULL ) {
342 errorText_ = "RtApi::openStream: input and output StreamParameters structures are both NULL!";
343 error( RtAudioError::INVALID_USE );
347 if ( formatBytes(format) == 0 ) {
348 errorText_ = "RtApi::openStream: 'format' parameter value is undefined.";
349 error( RtAudioError::INVALID_USE );
353 unsigned int nDevices = getDeviceCount();
354 unsigned int oChannels = 0;
356 oChannels = oParams->nChannels;
357 if ( oParams->deviceId >= nDevices ) {
358 errorText_ = "RtApi::openStream: output device parameter value is invalid.";
359 error( RtAudioError::INVALID_USE );
364 unsigned int iChannels = 0;
366 iChannels = iParams->nChannels;
367 if ( iParams->deviceId >= nDevices ) {
368 errorText_ = "RtApi::openStream: input device parameter value is invalid.";
369 error( RtAudioError::INVALID_USE );
376 if ( oChannels > 0 ) {
378 result = probeDeviceOpen( oParams->deviceId, OUTPUT, oChannels, oParams->firstChannel,
379 sampleRate, format, bufferFrames, options );
380 if ( result == false ) {
381 error( RtAudioError::SYSTEM_ERROR );
386 if ( iChannels > 0 ) {
388 result = probeDeviceOpen( iParams->deviceId, INPUT, iChannels, iParams->firstChannel,
389 sampleRate, format, bufferFrames, options );
390 if ( result == false ) {
391 if ( oChannels > 0 ) closeStream();
392 error( RtAudioError::SYSTEM_ERROR );
397 stream_.callbackInfo.callback = (void *) callback;
398 stream_.callbackInfo.userData = userData;
399 stream_.callbackInfo.errorCallback = (void *) errorCallback;
401 if ( options ) options->numberOfBuffers = stream_.nBuffers;
402 stream_.state = STREAM_STOPPED;
405 unsigned int RtApi :: getDefaultInputDevice( void )
407 // Should be implemented in subclasses if possible.
411 unsigned int RtApi :: getDefaultOutputDevice( void )
413 // Should be implemented in subclasses if possible.
417 void RtApi :: closeStream( void )
419 // MUST be implemented in subclasses!
423 bool RtApi :: probeDeviceOpen( unsigned int /*device*/, StreamMode /*mode*/, unsigned int /*channels*/,
424 unsigned int /*firstChannel*/, unsigned int /*sampleRate*/,
425 RtAudioFormat /*format*/, unsigned int * /*bufferSize*/,
426 RtAudio::StreamOptions * /*options*/ )
428 // MUST be implemented in subclasses!
432 void RtApi :: tickStreamTime( void )
434 // Subclasses that do not provide their own implementation of
435 // getStreamTime should call this function once per buffer I/O to
436 // provide basic stream time support.
438 stream_.streamTime += ( stream_.bufferSize * 1.0 / stream_.sampleRate );
440 #if defined( HAVE_GETTIMEOFDAY )
441 gettimeofday( &stream_.lastTickTimestamp, NULL );
445 long RtApi :: getStreamLatency( void )
449 long totalLatency = 0;
450 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
451 totalLatency = stream_.latency[0];
452 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
453 totalLatency += stream_.latency[1];
458 double RtApi :: getStreamTime( void )
462 #if defined( HAVE_GETTIMEOFDAY )
463 // Return a very accurate estimate of the stream time by
464 // adding in the elapsed time since the last tick.
468 if ( stream_.state != STREAM_RUNNING || stream_.streamTime == 0.0 )
469 return stream_.streamTime;
471 gettimeofday( &now, NULL );
472 then = stream_.lastTickTimestamp;
473 return stream_.streamTime +
474 ((now.tv_sec + 0.000001 * now.tv_usec) -
475 (then.tv_sec + 0.000001 * then.tv_usec));
477 return stream_.streamTime;
481 void RtApi :: setStreamTime( double time )
486 stream_.streamTime = time;
487 #if defined( HAVE_GETTIMEOFDAY )
488 gettimeofday( &stream_.lastTickTimestamp, NULL );
492 unsigned int RtApi :: getStreamSampleRate( void )
496 return stream_.sampleRate;
500 // *************************************************** //
502 // OS/API-specific methods.
504 // *************************************************** //
506 #if defined(__MACOSX_CORE__)
508 // The OS X CoreAudio API is designed to use a separate callback
509 // procedure for each of its audio devices. A single RtAudio duplex
510 // stream using two different devices is supported here, though it
511 // cannot be guaranteed to always behave correctly because we cannot
512 // synchronize these two callbacks.
514 // A property listener is installed for over/underrun information.
515 // However, no functionality is currently provided to allow property
516 // listeners to trigger user handlers because it is unclear what could
517 // be done if a critical stream parameter (buffer size, sample rate,
518 // device disconnect) notification arrived. The listeners entail
519 // quite a bit of extra code and most likely, a user program wouldn't
520 // be prepared for the result anyway. However, we do provide a flag
521 // to the client callback function to inform of an over/underrun.
523 // A structure to hold various information related to the CoreAudio API
526 AudioDeviceID id[2]; // device ids
527 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
528 AudioDeviceIOProcID procId[2];
530 UInt32 iStream[2]; // device stream index (or first if using multiple)
531 UInt32 nStreams[2]; // number of streams to use
534 pthread_cond_t condition;
535 int drainCounter; // Tracks callback counts when draining
536 bool internalDrain; // Indicates if stop is initiated from callback or not.
539 :deviceBuffer(0), drainCounter(0), internalDrain(false) { nStreams[0] = 1; nStreams[1] = 1; id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
542 RtApiCore:: RtApiCore()
544 #if defined( AVAILABLE_MAC_OS_X_VERSION_10_6_AND_LATER )
545 // This is a largely undocumented but absolutely necessary
546 // requirement starting with OS-X 10.6. If not called, queries and
547 // updates to various audio device properties are not handled
549 CFRunLoopRef theRunLoop = NULL;
550 AudioObjectPropertyAddress property = { kAudioHardwarePropertyRunLoop,
551 kAudioObjectPropertyScopeGlobal,
552 kAudioObjectPropertyElementMaster };
553 OSStatus result = AudioObjectSetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, sizeof(CFRunLoopRef), &theRunLoop);
554 if ( result != noErr ) {
555 errorText_ = "RtApiCore::RtApiCore: error setting run loop property!";
556 error( RtAudioError::WARNING );
561 RtApiCore :: ~RtApiCore()
563 // The subclass destructor gets called before the base class
564 // destructor, so close an existing stream before deallocating
565 // apiDeviceId memory.
566 if ( stream_.state != STREAM_CLOSED ) closeStream();
569 unsigned int RtApiCore :: getDeviceCount( void )
571 // Find out how many audio devices there are, if any.
573 AudioObjectPropertyAddress propertyAddress = { kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
574 OSStatus result = AudioObjectGetPropertyDataSize( kAudioObjectSystemObject, &propertyAddress, 0, NULL, &dataSize );
575 if ( result != noErr ) {
576 errorText_ = "RtApiCore::getDeviceCount: OS-X error getting device info!";
577 error( RtAudioError::WARNING );
581 return dataSize / sizeof( AudioDeviceID );
584 unsigned int RtApiCore :: getDefaultInputDevice( void )
586 unsigned int nDevices = getDeviceCount();
587 if ( nDevices <= 1 ) return 0;
590 UInt32 dataSize = sizeof( AudioDeviceID );
591 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultInputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
592 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
593 if ( result != noErr ) {
594 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device.";
595 error( RtAudioError::WARNING );
599 dataSize *= nDevices;
600 AudioDeviceID deviceList[ nDevices ];
601 property.mSelector = kAudioHardwarePropertyDevices;
602 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
603 if ( result != noErr ) {
604 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device IDs.";
605 error( RtAudioError::WARNING );
609 for ( unsigned int i=0; i<nDevices; i++ )
610 if ( id == deviceList[i] ) return i;
612 errorText_ = "RtApiCore::getDefaultInputDevice: No default device found!";
613 error( RtAudioError::WARNING );
617 unsigned int RtApiCore :: getDefaultOutputDevice( void )
619 unsigned int nDevices = getDeviceCount();
620 if ( nDevices <= 1 ) return 0;
623 UInt32 dataSize = sizeof( AudioDeviceID );
624 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultOutputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
625 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
626 if ( result != noErr ) {
627 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device.";
628 error( RtAudioError::WARNING );
632 dataSize = sizeof( AudioDeviceID ) * nDevices;
633 AudioDeviceID deviceList[ nDevices ];
634 property.mSelector = kAudioHardwarePropertyDevices;
635 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
636 if ( result != noErr ) {
637 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device IDs.";
638 error( RtAudioError::WARNING );
642 for ( unsigned int i=0; i<nDevices; i++ )
643 if ( id == deviceList[i] ) return i;
645 errorText_ = "RtApiCore::getDefaultOutputDevice: No default device found!";
646 error( RtAudioError::WARNING );
650 RtAudio::DeviceInfo RtApiCore :: getDeviceInfo( unsigned int device )
652 RtAudio::DeviceInfo info;
656 unsigned int nDevices = getDeviceCount();
657 if ( nDevices == 0 ) {
658 errorText_ = "RtApiCore::getDeviceInfo: no devices found!";
659 error( RtAudioError::INVALID_USE );
663 if ( device >= nDevices ) {
664 errorText_ = "RtApiCore::getDeviceInfo: device ID is invalid!";
665 error( RtAudioError::INVALID_USE );
669 AudioDeviceID deviceList[ nDevices ];
670 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
671 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
672 kAudioObjectPropertyScopeGlobal,
673 kAudioObjectPropertyElementMaster };
674 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
675 0, NULL, &dataSize, (void *) &deviceList );
676 if ( result != noErr ) {
677 errorText_ = "RtApiCore::getDeviceInfo: OS-X system error getting device IDs.";
678 error( RtAudioError::WARNING );
682 AudioDeviceID id = deviceList[ device ];
684 // Get the device name.
687 dataSize = sizeof( CFStringRef );
688 property.mSelector = kAudioObjectPropertyManufacturer;
689 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
690 if ( result != noErr ) {
691 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device manufacturer.";
692 errorText_ = errorStream_.str();
693 error( RtAudioError::WARNING );
697 //const char *mname = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
698 int length = CFStringGetLength(cfname);
699 char *mname = (char *)malloc(length * 3 + 1);
700 #if defined( UNICODE ) || defined( _UNICODE )
701 CFStringGetCString(cfname, mname, length * 3 + 1, kCFStringEncodingUTF8);
703 CFStringGetCString(cfname, mname, length * 3 + 1, CFStringGetSystemEncoding());
705 info.name.append( (const char *)mname, strlen(mname) );
706 info.name.append( ": " );
710 property.mSelector = kAudioObjectPropertyName;
711 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
712 if ( result != noErr ) {
713 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device name.";
714 errorText_ = errorStream_.str();
715 error( RtAudioError::WARNING );
719 //const char *name = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
720 length = CFStringGetLength(cfname);
721 char *name = (char *)malloc(length * 3 + 1);
722 #if defined( UNICODE ) || defined( _UNICODE )
723 CFStringGetCString(cfname, name, length * 3 + 1, kCFStringEncodingUTF8);
725 CFStringGetCString(cfname, name, length * 3 + 1, CFStringGetSystemEncoding());
727 info.name.append( (const char *)name, strlen(name) );
731 // Get the output stream "configuration".
732 AudioBufferList *bufferList = nil;
733 property.mSelector = kAudioDevicePropertyStreamConfiguration;
734 property.mScope = kAudioDevicePropertyScopeOutput;
735 // property.mElement = kAudioObjectPropertyElementWildcard;
737 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
738 if ( result != noErr || dataSize == 0 ) {
739 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration info for device (" << device << ").";
740 errorText_ = errorStream_.str();
741 error( RtAudioError::WARNING );
745 // Allocate the AudioBufferList.
746 bufferList = (AudioBufferList *) malloc( dataSize );
747 if ( bufferList == NULL ) {
748 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating output AudioBufferList.";
749 error( RtAudioError::WARNING );
753 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
754 if ( result != noErr || dataSize == 0 ) {
756 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration for device (" << device << ").";
757 errorText_ = errorStream_.str();
758 error( RtAudioError::WARNING );
762 // Get output channel information.
763 unsigned int i, nStreams = bufferList->mNumberBuffers;
764 for ( i=0; i<nStreams; i++ )
765 info.outputChannels += bufferList->mBuffers[i].mNumberChannels;
768 // Get the input stream "configuration".
769 property.mScope = kAudioDevicePropertyScopeInput;
770 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
771 if ( result != noErr || dataSize == 0 ) {
772 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration info for device (" << device << ").";
773 errorText_ = errorStream_.str();
774 error( RtAudioError::WARNING );
778 // Allocate the AudioBufferList.
779 bufferList = (AudioBufferList *) malloc( dataSize );
780 if ( bufferList == NULL ) {
781 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating input AudioBufferList.";
782 error( RtAudioError::WARNING );
786 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
787 if (result != noErr || dataSize == 0) {
789 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration for device (" << device << ").";
790 errorText_ = errorStream_.str();
791 error( RtAudioError::WARNING );
795 // Get input channel information.
796 nStreams = bufferList->mNumberBuffers;
797 for ( i=0; i<nStreams; i++ )
798 info.inputChannels += bufferList->mBuffers[i].mNumberChannels;
801 // If device opens for both playback and capture, we determine the channels.
802 if ( info.outputChannels > 0 && info.inputChannels > 0 )
803 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
805 // Probe the device sample rates.
806 bool isInput = false;
807 if ( info.outputChannels == 0 ) isInput = true;
809 // Determine the supported sample rates.
810 property.mSelector = kAudioDevicePropertyAvailableNominalSampleRates;
811 if ( isInput == false ) property.mScope = kAudioDevicePropertyScopeOutput;
812 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
813 if ( result != kAudioHardwareNoError || dataSize == 0 ) {
814 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rate info.";
815 errorText_ = errorStream_.str();
816 error( RtAudioError::WARNING );
820 UInt32 nRanges = dataSize / sizeof( AudioValueRange );
821 AudioValueRange rangeList[ nRanges ];
822 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &rangeList );
823 if ( result != kAudioHardwareNoError ) {
824 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rates.";
825 errorText_ = errorStream_.str();
826 error( RtAudioError::WARNING );
830 // The sample rate reporting mechanism is a bit of a mystery. It
831 // seems that it can either return individual rates or a range of
832 // rates. I assume that if the min / max range values are the same,
833 // then that represents a single supported rate and if the min / max
834 // range values are different, the device supports an arbitrary
835 // range of values (though there might be multiple ranges, so we'll
836 // use the most conservative range).
837 Float64 minimumRate = 1.0, maximumRate = 10000000000.0;
838 bool haveValueRange = false;
839 info.sampleRates.clear();
840 for ( UInt32 i=0; i<nRanges; i++ ) {
841 if ( rangeList[i].mMinimum == rangeList[i].mMaximum ) {
842 unsigned int tmpSr = (unsigned int) rangeList[i].mMinimum;
843 info.sampleRates.push_back( tmpSr );
845 if ( !info.preferredSampleRate || ( tmpSr <= 48000 && tmpSr > info.preferredSampleRate ) )
846 info.preferredSampleRate = tmpSr;
849 haveValueRange = true;
850 if ( rangeList[i].mMinimum > minimumRate ) minimumRate = rangeList[i].mMinimum;
851 if ( rangeList[i].mMaximum < maximumRate ) maximumRate = rangeList[i].mMaximum;
855 if ( haveValueRange ) {
856 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
857 if ( SAMPLE_RATES[k] >= (unsigned int) minimumRate && SAMPLE_RATES[k] <= (unsigned int) maximumRate ) {
858 info.sampleRates.push_back( SAMPLE_RATES[k] );
860 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
861 info.preferredSampleRate = SAMPLE_RATES[k];
866 // Sort and remove any redundant values
867 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
868 info.sampleRates.erase( unique( info.sampleRates.begin(), info.sampleRates.end() ), info.sampleRates.end() );
870 if ( info.sampleRates.size() == 0 ) {
871 errorStream_ << "RtApiCore::probeDeviceInfo: No supported sample rates found for device (" << device << ").";
872 errorText_ = errorStream_.str();
873 error( RtAudioError::WARNING );
877 // CoreAudio always uses 32-bit floating point data for PCM streams.
878 // Thus, any other "physical" formats supported by the device are of
879 // no interest to the client.
880 info.nativeFormats = RTAUDIO_FLOAT32;
882 if ( info.outputChannels > 0 )
883 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
884 if ( info.inputChannels > 0 )
885 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
891 static OSStatus callbackHandler( AudioDeviceID inDevice,
892 const AudioTimeStamp* /*inNow*/,
893 const AudioBufferList* inInputData,
894 const AudioTimeStamp* /*inInputTime*/,
895 AudioBufferList* outOutputData,
896 const AudioTimeStamp* /*inOutputTime*/,
899 CallbackInfo *info = (CallbackInfo *) infoPointer;
901 RtApiCore *object = (RtApiCore *) info->object;
902 if ( object->callbackEvent( inDevice, inInputData, outOutputData ) == false )
903 return kAudioHardwareUnspecifiedError;
905 return kAudioHardwareNoError;
908 static OSStatus xrunListener( AudioObjectID /*inDevice*/,
910 const AudioObjectPropertyAddress properties[],
911 void* handlePointer )
913 CoreHandle *handle = (CoreHandle *) handlePointer;
914 for ( UInt32 i=0; i<nAddresses; i++ ) {
915 if ( properties[i].mSelector == kAudioDeviceProcessorOverload ) {
916 if ( properties[i].mScope == kAudioDevicePropertyScopeInput )
917 handle->xrun[1] = true;
919 handle->xrun[0] = true;
923 return kAudioHardwareNoError;
926 static OSStatus rateListener( AudioObjectID inDevice,
927 UInt32 /*nAddresses*/,
928 const AudioObjectPropertyAddress /*properties*/[],
931 Float64 *rate = (Float64 *) ratePointer;
932 UInt32 dataSize = sizeof( Float64 );
933 AudioObjectPropertyAddress property = { kAudioDevicePropertyNominalSampleRate,
934 kAudioObjectPropertyScopeGlobal,
935 kAudioObjectPropertyElementMaster };
936 AudioObjectGetPropertyData( inDevice, &property, 0, NULL, &dataSize, rate );
937 return kAudioHardwareNoError;
940 bool RtApiCore :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
941 unsigned int firstChannel, unsigned int sampleRate,
942 RtAudioFormat format, unsigned int *bufferSize,
943 RtAudio::StreamOptions *options )
946 unsigned int nDevices = getDeviceCount();
947 if ( nDevices == 0 ) {
948 // This should not happen because a check is made before this function is called.
949 errorText_ = "RtApiCore::probeDeviceOpen: no devices found!";
953 if ( device >= nDevices ) {
954 // This should not happen because a check is made before this function is called.
955 errorText_ = "RtApiCore::probeDeviceOpen: device ID is invalid!";
959 AudioDeviceID deviceList[ nDevices ];
960 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
961 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
962 kAudioObjectPropertyScopeGlobal,
963 kAudioObjectPropertyElementMaster };
964 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
965 0, NULL, &dataSize, (void *) &deviceList );
966 if ( result != noErr ) {
967 errorText_ = "RtApiCore::probeDeviceOpen: OS-X system error getting device IDs.";
971 AudioDeviceID id = deviceList[ device ];
973 // Setup for stream mode.
974 bool isInput = false;
975 if ( mode == INPUT ) {
977 property.mScope = kAudioDevicePropertyScopeInput;
980 property.mScope = kAudioDevicePropertyScopeOutput;
982 // Get the stream "configuration".
983 AudioBufferList *bufferList = nil;
985 property.mSelector = kAudioDevicePropertyStreamConfiguration;
986 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
987 if ( result != noErr || dataSize == 0 ) {
988 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration info for device (" << device << ").";
989 errorText_ = errorStream_.str();
993 // Allocate the AudioBufferList.
994 bufferList = (AudioBufferList *) malloc( dataSize );
995 if ( bufferList == NULL ) {
996 errorText_ = "RtApiCore::probeDeviceOpen: memory error allocating AudioBufferList.";
1000 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
1001 if (result != noErr || dataSize == 0) {
1003 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration for device (" << device << ").";
1004 errorText_ = errorStream_.str();
1008 // Search for one or more streams that contain the desired number of
1009 // channels. CoreAudio devices can have an arbitrary number of
1010 // streams and each stream can have an arbitrary number of channels.
1011 // For each stream, a single buffer of interleaved samples is
1012 // provided. RtAudio prefers the use of one stream of interleaved
1013 // data or multiple consecutive single-channel streams. However, we
1014 // now support multiple consecutive multi-channel streams of
1015 // interleaved data as well.
1016 UInt32 iStream, offsetCounter = firstChannel;
1017 UInt32 nStreams = bufferList->mNumberBuffers;
1018 bool monoMode = false;
1019 bool foundStream = false;
1021 // First check that the device supports the requested number of
1023 UInt32 deviceChannels = 0;
1024 for ( iStream=0; iStream<nStreams; iStream++ )
1025 deviceChannels += bufferList->mBuffers[iStream].mNumberChannels;
1027 if ( deviceChannels < ( channels + firstChannel ) ) {
1029 errorStream_ << "RtApiCore::probeDeviceOpen: the device (" << device << ") does not support the requested channel count.";
1030 errorText_ = errorStream_.str();
1034 // Look for a single stream meeting our needs.
1035 UInt32 firstStream, streamCount = 1, streamChannels = 0, channelOffset = 0;
1036 for ( iStream=0; iStream<nStreams; iStream++ ) {
1037 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
1038 if ( streamChannels >= channels + offsetCounter ) {
1039 firstStream = iStream;
1040 channelOffset = offsetCounter;
1044 if ( streamChannels > offsetCounter ) break;
1045 offsetCounter -= streamChannels;
1048 // If we didn't find a single stream above, then we should be able
1049 // to meet the channel specification with multiple streams.
1050 if ( foundStream == false ) {
1052 offsetCounter = firstChannel;
1053 for ( iStream=0; iStream<nStreams; iStream++ ) {
1054 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
1055 if ( streamChannels > offsetCounter ) break;
1056 offsetCounter -= streamChannels;
1059 firstStream = iStream;
1060 channelOffset = offsetCounter;
1061 Int32 channelCounter = channels + offsetCounter - streamChannels;
1063 if ( streamChannels > 1 ) monoMode = false;
1064 while ( channelCounter > 0 ) {
1065 streamChannels = bufferList->mBuffers[++iStream].mNumberChannels;
1066 if ( streamChannels > 1 ) monoMode = false;
1067 channelCounter -= streamChannels;
1074 // Determine the buffer size.
1075 AudioValueRange bufferRange;
1076 dataSize = sizeof( AudioValueRange );
1077 property.mSelector = kAudioDevicePropertyBufferFrameSizeRange;
1078 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &bufferRange );
1080 if ( result != noErr ) {
1081 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting buffer size range for device (" << device << ").";
1082 errorText_ = errorStream_.str();
1086 if ( bufferRange.mMinimum > *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMinimum;
1087 else if ( bufferRange.mMaximum < *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMaximum;
1088 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) *bufferSize = (unsigned long) bufferRange.mMinimum;
1090 // Set the buffer size. For multiple streams, I'm assuming we only
1091 // need to make this setting for the master channel.
1092 UInt32 theSize = (UInt32) *bufferSize;
1093 dataSize = sizeof( UInt32 );
1094 property.mSelector = kAudioDevicePropertyBufferFrameSize;
1095 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &theSize );
1097 if ( result != noErr ) {
1098 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting the buffer size for device (" << device << ").";
1099 errorText_ = errorStream_.str();
1103 // If attempting to setup a duplex stream, the bufferSize parameter
1104 // MUST be the same in both directions!
1105 *bufferSize = theSize;
1106 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
1107 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << device << ").";
1108 errorText_ = errorStream_.str();
1112 stream_.bufferSize = *bufferSize;
1113 stream_.nBuffers = 1;
1115 // Try to set "hog" mode ... it's not clear to me this is working.
1116 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) {
1118 dataSize = sizeof( hog_pid );
1119 property.mSelector = kAudioDevicePropertyHogMode;
1120 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &hog_pid );
1121 if ( result != noErr ) {
1122 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting 'hog' state!";
1123 errorText_ = errorStream_.str();
1127 if ( hog_pid != getpid() ) {
1129 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &hog_pid );
1130 if ( result != noErr ) {
1131 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting 'hog' state!";
1132 errorText_ = errorStream_.str();
1138 // Check and if necessary, change the sample rate for the device.
1139 Float64 nominalRate;
1140 dataSize = sizeof( Float64 );
1141 property.mSelector = kAudioDevicePropertyNominalSampleRate;
1142 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &nominalRate );
1143 if ( result != noErr ) {
1144 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting current sample rate.";
1145 errorText_ = errorStream_.str();
1149 // Only change the sample rate if off by more than 1 Hz.
1150 if ( fabs( nominalRate - (double)sampleRate ) > 1.0 ) {
1152 // Set a property listener for the sample rate change
1153 Float64 reportedRate = 0.0;
1154 AudioObjectPropertyAddress tmp = { kAudioDevicePropertyNominalSampleRate, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
1155 result = AudioObjectAddPropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1156 if ( result != noErr ) {
1157 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate property listener for device (" << device << ").";
1158 errorText_ = errorStream_.str();
1162 nominalRate = (Float64) sampleRate;
1163 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &nominalRate );
1164 if ( result != noErr ) {
1165 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1166 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate for device (" << device << ").";
1167 errorText_ = errorStream_.str();
1171 // Now wait until the reported nominal rate is what we just set.
1172 UInt32 microCounter = 0;
1173 while ( reportedRate != nominalRate ) {
1174 microCounter += 5000;
1175 if ( microCounter > 5000000 ) break;
1179 // Remove the property listener.
1180 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1182 if ( microCounter > 5000000 ) {
1183 errorStream_ << "RtApiCore::probeDeviceOpen: timeout waiting for sample rate update for device (" << device << ").";
1184 errorText_ = errorStream_.str();
1189 // Now set the stream format for all streams. Also, check the
1190 // physical format of the device and change that if necessary.
1191 AudioStreamBasicDescription description;
1192 dataSize = sizeof( AudioStreamBasicDescription );
1193 property.mSelector = kAudioStreamPropertyVirtualFormat;
1194 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
1195 if ( result != noErr ) {
1196 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream format for device (" << device << ").";
1197 errorText_ = errorStream_.str();
1201 // Set the sample rate and data format id. However, only make the
1202 // change if the sample rate is not within 1.0 of the desired
1203 // rate and the format is not linear pcm.
1204 bool updateFormat = false;
1205 if ( fabs( description.mSampleRate - (Float64)sampleRate ) > 1.0 ) {
1206 description.mSampleRate = (Float64) sampleRate;
1207 updateFormat = true;
1210 if ( description.mFormatID != kAudioFormatLinearPCM ) {
1211 description.mFormatID = kAudioFormatLinearPCM;
1212 updateFormat = true;
1215 if ( updateFormat ) {
1216 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &description );
1217 if ( result != noErr ) {
1218 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate or data format for device (" << device << ").";
1219 errorText_ = errorStream_.str();
1224 // Now check the physical format.
1225 property.mSelector = kAudioStreamPropertyPhysicalFormat;
1226 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
1227 if ( result != noErr ) {
1228 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream physical format for device (" << device << ").";
1229 errorText_ = errorStream_.str();
1233 //std::cout << "Current physical stream format:" << std::endl;
1234 //std::cout << " mBitsPerChan = " << description.mBitsPerChannel << std::endl;
1235 //std::cout << " aligned high = " << (description.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (description.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
1236 //std::cout << " bytesPerFrame = " << description.mBytesPerFrame << std::endl;
1237 //std::cout << " sample rate = " << description.mSampleRate << std::endl;
1239 if ( description.mFormatID != kAudioFormatLinearPCM || description.mBitsPerChannel < 16 ) {
1240 description.mFormatID = kAudioFormatLinearPCM;
1241 //description.mSampleRate = (Float64) sampleRate;
1242 AudioStreamBasicDescription testDescription = description;
1245 // We'll try higher bit rates first and then work our way down.
1246 std::vector< std::pair<UInt32, UInt32> > physicalFormats;
1247 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsFloat) & ~kLinearPCMFormatFlagIsSignedInteger;
1248 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
1249 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
1250 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
1251 physicalFormats.push_back( std::pair<Float32, UInt32>( 24, formatFlags ) ); // 24-bit packed
1252 formatFlags &= ~( kAudioFormatFlagIsPacked | kAudioFormatFlagIsAlignedHigh );
1253 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.2, formatFlags ) ); // 24-bit in 4 bytes, aligned low
1254 formatFlags |= kAudioFormatFlagIsAlignedHigh;
1255 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.4, formatFlags ) ); // 24-bit in 4 bytes, aligned high
1256 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
1257 physicalFormats.push_back( std::pair<Float32, UInt32>( 16, formatFlags ) );
1258 physicalFormats.push_back( std::pair<Float32, UInt32>( 8, formatFlags ) );
1260 bool setPhysicalFormat = false;
1261 for( unsigned int i=0; i<physicalFormats.size(); i++ ) {
1262 testDescription = description;
1263 testDescription.mBitsPerChannel = (UInt32) physicalFormats[i].first;
1264 testDescription.mFormatFlags = physicalFormats[i].second;
1265 if ( (24 == (UInt32)physicalFormats[i].first) && ~( physicalFormats[i].second & kAudioFormatFlagIsPacked ) )
1266 testDescription.mBytesPerFrame = 4 * testDescription.mChannelsPerFrame;
1268 testDescription.mBytesPerFrame = testDescription.mBitsPerChannel/8 * testDescription.mChannelsPerFrame;
1269 testDescription.mBytesPerPacket = testDescription.mBytesPerFrame * testDescription.mFramesPerPacket;
1270 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &testDescription );
1271 if ( result == noErr ) {
1272 setPhysicalFormat = true;
1273 //std::cout << "Updated physical stream format:" << std::endl;
1274 //std::cout << " mBitsPerChan = " << testDescription.mBitsPerChannel << std::endl;
1275 //std::cout << " aligned high = " << (testDescription.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (testDescription.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
1276 //std::cout << " bytesPerFrame = " << testDescription.mBytesPerFrame << std::endl;
1277 //std::cout << " sample rate = " << testDescription.mSampleRate << std::endl;
1282 if ( !setPhysicalFormat ) {
1283 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting physical data format for device (" << device << ").";
1284 errorText_ = errorStream_.str();
1287 } // done setting virtual/physical formats.
1289 // Get the stream / device latency.
1291 dataSize = sizeof( UInt32 );
1292 property.mSelector = kAudioDevicePropertyLatency;
1293 if ( AudioObjectHasProperty( id, &property ) == true ) {
1294 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &latency );
1295 if ( result == kAudioHardwareNoError ) stream_.latency[ mode ] = latency;
1297 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting device latency for device (" << device << ").";
1298 errorText_ = errorStream_.str();
1299 error( RtAudioError::WARNING );
1303 // Byte-swapping: According to AudioHardware.h, the stream data will
1304 // always be presented in native-endian format, so we should never
1305 // need to byte swap.
1306 stream_.doByteSwap[mode] = false;
1308 // From the CoreAudio documentation, PCM data must be supplied as
1310 stream_.userFormat = format;
1311 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
1313 if ( streamCount == 1 )
1314 stream_.nDeviceChannels[mode] = description.mChannelsPerFrame;
1315 else // multiple streams
1316 stream_.nDeviceChannels[mode] = channels;
1317 stream_.nUserChannels[mode] = channels;
1318 stream_.channelOffset[mode] = channelOffset; // offset within a CoreAudio stream
1319 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
1320 else stream_.userInterleaved = true;
1321 stream_.deviceInterleaved[mode] = true;
1322 if ( monoMode == true ) stream_.deviceInterleaved[mode] = false;
1324 // Set flags for buffer conversion.
1325 stream_.doConvertBuffer[mode] = false;
1326 if ( stream_.userFormat != stream_.deviceFormat[mode] )
1327 stream_.doConvertBuffer[mode] = true;
1328 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
1329 stream_.doConvertBuffer[mode] = true;
1330 if ( streamCount == 1 ) {
1331 if ( stream_.nUserChannels[mode] > 1 &&
1332 stream_.userInterleaved != stream_.deviceInterleaved[mode] )
1333 stream_.doConvertBuffer[mode] = true;
1335 else if ( monoMode && stream_.userInterleaved )
1336 stream_.doConvertBuffer[mode] = true;
1338 // Allocate our CoreHandle structure for the stream.
1339 CoreHandle *handle = 0;
1340 if ( stream_.apiHandle == 0 ) {
1342 handle = new CoreHandle;
1344 catch ( std::bad_alloc& ) {
1345 errorText_ = "RtApiCore::probeDeviceOpen: error allocating CoreHandle memory.";
1349 if ( pthread_cond_init( &handle->condition, NULL ) ) {
1350 errorText_ = "RtApiCore::probeDeviceOpen: error initializing pthread condition variable.";
1353 stream_.apiHandle = (void *) handle;
1356 handle = (CoreHandle *) stream_.apiHandle;
1357 handle->iStream[mode] = firstStream;
1358 handle->nStreams[mode] = streamCount;
1359 handle->id[mode] = id;
1361 // Allocate necessary internal buffers.
1362 unsigned long bufferBytes;
1363 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
1364 // stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
1365 stream_.userBuffer[mode] = (char *) malloc( bufferBytes * sizeof(char) );
1366 memset( stream_.userBuffer[mode], 0, bufferBytes * sizeof(char) );
1367 if ( stream_.userBuffer[mode] == NULL ) {
1368 errorText_ = "RtApiCore::probeDeviceOpen: error allocating user buffer memory.";
1372 // If possible, we will make use of the CoreAudio stream buffers as
1373 // "device buffers". However, we can't do this if using multiple
1375 if ( stream_.doConvertBuffer[mode] && handle->nStreams[mode] > 1 ) {
1377 bool makeBuffer = true;
1378 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
1379 if ( mode == INPUT ) {
1380 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
1381 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
1382 if ( bufferBytes <= bytesOut ) makeBuffer = false;
1387 bufferBytes *= *bufferSize;
1388 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
1389 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
1390 if ( stream_.deviceBuffer == NULL ) {
1391 errorText_ = "RtApiCore::probeDeviceOpen: error allocating device buffer memory.";
1397 stream_.sampleRate = sampleRate;
1398 stream_.device[mode] = device;
1399 stream_.state = STREAM_STOPPED;
1400 stream_.callbackInfo.object = (void *) this;
1402 // Setup the buffer conversion information structure.
1403 if ( stream_.doConvertBuffer[mode] ) {
1404 if ( streamCount > 1 ) setConvertInfo( mode, 0 );
1405 else setConvertInfo( mode, channelOffset );
1408 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device )
1409 // Only one callback procedure per device.
1410 stream_.mode = DUPLEX;
1412 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1413 result = AudioDeviceCreateIOProcID( id, callbackHandler, (void *) &stream_.callbackInfo, &handle->procId[mode] );
1415 // deprecated in favor of AudioDeviceCreateIOProcID()
1416 result = AudioDeviceAddIOProc( id, callbackHandler, (void *) &stream_.callbackInfo );
1418 if ( result != noErr ) {
1419 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting callback for device (" << device << ").";
1420 errorText_ = errorStream_.str();
1423 if ( stream_.mode == OUTPUT && mode == INPUT )
1424 stream_.mode = DUPLEX;
1426 stream_.mode = mode;
1429 // Setup the device property listener for over/underload.
1430 property.mSelector = kAudioDeviceProcessorOverload;
1431 property.mScope = kAudioObjectPropertyScopeGlobal;
1432 result = AudioObjectAddPropertyListener( id, &property, xrunListener, (void *) handle );
1438 pthread_cond_destroy( &handle->condition );
1440 stream_.apiHandle = 0;
1443 for ( int i=0; i<2; i++ ) {
1444 if ( stream_.userBuffer[i] ) {
1445 free( stream_.userBuffer[i] );
1446 stream_.userBuffer[i] = 0;
1450 if ( stream_.deviceBuffer ) {
1451 free( stream_.deviceBuffer );
1452 stream_.deviceBuffer = 0;
1455 stream_.state = STREAM_CLOSED;
1459 void RtApiCore :: closeStream( void )
1461 if ( stream_.state == STREAM_CLOSED ) {
1462 errorText_ = "RtApiCore::closeStream(): no open stream to close!";
1463 error( RtAudioError::WARNING );
1467 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1468 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1470 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
1471 kAudioObjectPropertyScopeGlobal,
1472 kAudioObjectPropertyElementMaster };
1474 property.mSelector = kAudioDeviceProcessorOverload;
1475 property.mScope = kAudioObjectPropertyScopeGlobal;
1476 if (AudioObjectRemovePropertyListener( handle->id[0], &property, xrunListener, (void *) handle ) != noErr) {
1477 errorText_ = "RtApiCore::closeStream(): error removing property listener!";
1478 error( RtAudioError::WARNING );
1481 if ( stream_.state == STREAM_RUNNING )
1482 AudioDeviceStop( handle->id[0], callbackHandler );
1483 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1484 AudioDeviceDestroyIOProcID( handle->id[0], handle->procId[0] );
1486 // deprecated in favor of AudioDeviceDestroyIOProcID()
1487 AudioDeviceRemoveIOProc( handle->id[0], callbackHandler );
1491 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1493 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
1494 kAudioObjectPropertyScopeGlobal,
1495 kAudioObjectPropertyElementMaster };
1497 property.mSelector = kAudioDeviceProcessorOverload;
1498 property.mScope = kAudioObjectPropertyScopeGlobal;
1499 if (AudioObjectRemovePropertyListener( handle->id[1], &property, xrunListener, (void *) handle ) != noErr) {
1500 errorText_ = "RtApiCore::closeStream(): error removing property listener!";
1501 error( RtAudioError::WARNING );
1504 if ( stream_.state == STREAM_RUNNING )
1505 AudioDeviceStop( handle->id[1], callbackHandler );
1506 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1507 AudioDeviceDestroyIOProcID( handle->id[1], handle->procId[1] );
1509 // deprecated in favor of AudioDeviceDestroyIOProcID()
1510 AudioDeviceRemoveIOProc( handle->id[1], callbackHandler );
1514 for ( int i=0; i<2; i++ ) {
1515 if ( stream_.userBuffer[i] ) {
1516 free( stream_.userBuffer[i] );
1517 stream_.userBuffer[i] = 0;
1521 if ( stream_.deviceBuffer ) {
1522 free( stream_.deviceBuffer );
1523 stream_.deviceBuffer = 0;
1526 // Destroy pthread condition variable.
1527 pthread_cond_destroy( &handle->condition );
1529 stream_.apiHandle = 0;
1531 stream_.mode = UNINITIALIZED;
1532 stream_.state = STREAM_CLOSED;
1535 void RtApiCore :: startStream( void )
1538 if ( stream_.state == STREAM_RUNNING ) {
1539 errorText_ = "RtApiCore::startStream(): the stream is already running!";
1540 error( RtAudioError::WARNING );
1544 OSStatus result = noErr;
1545 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1546 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1548 result = AudioDeviceStart( handle->id[0], callbackHandler );
1549 if ( result != noErr ) {
1550 errorStream_ << "RtApiCore::startStream: system error (" << getErrorCode( result ) << ") starting callback procedure on device (" << stream_.device[0] << ").";
1551 errorText_ = errorStream_.str();
1556 if ( stream_.mode == INPUT ||
1557 ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1559 result = AudioDeviceStart( handle->id[1], callbackHandler );
1560 if ( result != noErr ) {
1561 errorStream_ << "RtApiCore::startStream: system error starting input callback procedure on device (" << stream_.device[1] << ").";
1562 errorText_ = errorStream_.str();
1567 handle->drainCounter = 0;
1568 handle->internalDrain = false;
1569 stream_.state = STREAM_RUNNING;
1572 if ( result == noErr ) return;
1573 error( RtAudioError::SYSTEM_ERROR );
1576 void RtApiCore :: stopStream( void )
1579 if ( stream_.state == STREAM_STOPPED ) {
1580 errorText_ = "RtApiCore::stopStream(): the stream is already stopped!";
1581 error( RtAudioError::WARNING );
1585 OSStatus result = noErr;
1586 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1587 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1589 if ( handle->drainCounter == 0 ) {
1590 handle->drainCounter = 2;
1591 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
1594 result = AudioDeviceStop( handle->id[0], callbackHandler );
1595 if ( result != noErr ) {
1596 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping callback procedure on device (" << stream_.device[0] << ").";
1597 errorText_ = errorStream_.str();
1602 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1604 result = AudioDeviceStop( handle->id[1], callbackHandler );
1605 if ( result != noErr ) {
1606 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping input callback procedure on device (" << stream_.device[1] << ").";
1607 errorText_ = errorStream_.str();
1612 stream_.state = STREAM_STOPPED;
1615 if ( result == noErr ) return;
1616 error( RtAudioError::SYSTEM_ERROR );
1619 void RtApiCore :: abortStream( void )
1622 if ( stream_.state == STREAM_STOPPED ) {
1623 errorText_ = "RtApiCore::abortStream(): the stream is already stopped!";
1624 error( RtAudioError::WARNING );
1628 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1629 handle->drainCounter = 2;
1634 // This function will be called by a spawned thread when the user
1635 // callback function signals that the stream should be stopped or
1636 // aborted. It is better to handle it this way because the
1637 // callbackEvent() function probably should return before the AudioDeviceStop()
1638 // function is called.
1639 static void *coreStopStream( void *ptr )
1641 CallbackInfo *info = (CallbackInfo *) ptr;
1642 RtApiCore *object = (RtApiCore *) info->object;
1644 object->stopStream();
1645 pthread_exit( NULL );
1648 bool RtApiCore :: callbackEvent( AudioDeviceID deviceId,
1649 const AudioBufferList *inBufferList,
1650 const AudioBufferList *outBufferList )
1652 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
1653 if ( stream_.state == STREAM_CLOSED ) {
1654 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
1655 error( RtAudioError::WARNING );
1659 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
1660 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1662 // Check if we were draining the stream and signal is finished.
1663 if ( handle->drainCounter > 3 ) {
1664 ThreadHandle threadId;
1666 stream_.state = STREAM_STOPPING;
1667 if ( handle->internalDrain == true )
1668 pthread_create( &threadId, NULL, coreStopStream, info );
1669 else // external call to stopStream()
1670 pthread_cond_signal( &handle->condition );
1674 AudioDeviceID outputDevice = handle->id[0];
1676 // Invoke user callback to get fresh output data UNLESS we are
1677 // draining stream or duplex mode AND the input/output devices are
1678 // different AND this function is called for the input device.
1679 if ( handle->drainCounter == 0 && ( stream_.mode != DUPLEX || deviceId == outputDevice ) ) {
1680 RtAudioCallback callback = (RtAudioCallback) info->callback;
1681 double streamTime = getStreamTime();
1682 RtAudioStreamStatus status = 0;
1683 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
1684 status |= RTAUDIO_OUTPUT_UNDERFLOW;
1685 handle->xrun[0] = false;
1687 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
1688 status |= RTAUDIO_INPUT_OVERFLOW;
1689 handle->xrun[1] = false;
1692 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
1693 stream_.bufferSize, streamTime, status, info->userData );
1694 if ( cbReturnValue == 2 ) {
1695 stream_.state = STREAM_STOPPING;
1696 handle->drainCounter = 2;
1700 else if ( cbReturnValue == 1 ) {
1701 handle->drainCounter = 1;
1702 handle->internalDrain = true;
1706 if ( stream_.mode == OUTPUT || ( stream_.mode == DUPLEX && deviceId == outputDevice ) ) {
1708 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
1710 if ( handle->nStreams[0] == 1 ) {
1711 memset( outBufferList->mBuffers[handle->iStream[0]].mData,
1713 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
1715 else { // fill multiple streams with zeros
1716 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
1717 memset( outBufferList->mBuffers[handle->iStream[0]+i].mData,
1719 outBufferList->mBuffers[handle->iStream[0]+i].mDataByteSize );
1723 else if ( handle->nStreams[0] == 1 ) {
1724 if ( stream_.doConvertBuffer[0] ) { // convert directly to CoreAudio stream buffer
1725 convertBuffer( (char *) outBufferList->mBuffers[handle->iStream[0]].mData,
1726 stream_.userBuffer[0], stream_.convertInfo[0] );
1728 else { // copy from user buffer
1729 memcpy( outBufferList->mBuffers[handle->iStream[0]].mData,
1730 stream_.userBuffer[0],
1731 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
1734 else { // fill multiple streams
1735 Float32 *inBuffer = (Float32 *) stream_.userBuffer[0];
1736 if ( stream_.doConvertBuffer[0] ) {
1737 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
1738 inBuffer = (Float32 *) stream_.deviceBuffer;
1741 if ( stream_.deviceInterleaved[0] == false ) { // mono mode
1742 UInt32 bufferBytes = outBufferList->mBuffers[handle->iStream[0]].mDataByteSize;
1743 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
1744 memcpy( outBufferList->mBuffers[handle->iStream[0]+i].mData,
1745 (void *)&inBuffer[i*stream_.bufferSize], bufferBytes );
1748 else { // fill multiple multi-channel streams with interleaved data
1749 UInt32 streamChannels, channelsLeft, inJump, outJump, inOffset;
1752 bool inInterleaved = ( stream_.userInterleaved ) ? true : false;
1753 UInt32 inChannels = stream_.nUserChannels[0];
1754 if ( stream_.doConvertBuffer[0] ) {
1755 inInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
1756 inChannels = stream_.nDeviceChannels[0];
1759 if ( inInterleaved ) inOffset = 1;
1760 else inOffset = stream_.bufferSize;
1762 channelsLeft = inChannels;
1763 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
1765 out = (Float32 *) outBufferList->mBuffers[handle->iStream[0]+i].mData;
1766 streamChannels = outBufferList->mBuffers[handle->iStream[0]+i].mNumberChannels;
1769 // Account for possible channel offset in first stream
1770 if ( i == 0 && stream_.channelOffset[0] > 0 ) {
1771 streamChannels -= stream_.channelOffset[0];
1772 outJump = stream_.channelOffset[0];
1776 // Account for possible unfilled channels at end of the last stream
1777 if ( streamChannels > channelsLeft ) {
1778 outJump = streamChannels - channelsLeft;
1779 streamChannels = channelsLeft;
1782 // Determine input buffer offsets and skips
1783 if ( inInterleaved ) {
1784 inJump = inChannels;
1785 in += inChannels - channelsLeft;
1789 in += (inChannels - channelsLeft) * inOffset;
1792 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
1793 for ( unsigned int j=0; j<streamChannels; j++ ) {
1794 *out++ = in[j*inOffset];
1799 channelsLeft -= streamChannels;
1805 // Don't bother draining input
1806 if ( handle->drainCounter ) {
1807 handle->drainCounter++;
1811 AudioDeviceID inputDevice;
1812 inputDevice = handle->id[1];
1813 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && deviceId == inputDevice ) ) {
1815 if ( handle->nStreams[1] == 1 ) {
1816 if ( stream_.doConvertBuffer[1] ) { // convert directly from CoreAudio stream buffer
1817 convertBuffer( stream_.userBuffer[1],
1818 (char *) inBufferList->mBuffers[handle->iStream[1]].mData,
1819 stream_.convertInfo[1] );
1821 else { // copy to user buffer
1822 memcpy( stream_.userBuffer[1],
1823 inBufferList->mBuffers[handle->iStream[1]].mData,
1824 inBufferList->mBuffers[handle->iStream[1]].mDataByteSize );
1827 else { // read from multiple streams
1828 Float32 *outBuffer = (Float32 *) stream_.userBuffer[1];
1829 if ( stream_.doConvertBuffer[1] ) outBuffer = (Float32 *) stream_.deviceBuffer;
1831 if ( stream_.deviceInterleaved[1] == false ) { // mono mode
1832 UInt32 bufferBytes = inBufferList->mBuffers[handle->iStream[1]].mDataByteSize;
1833 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
1834 memcpy( (void *)&outBuffer[i*stream_.bufferSize],
1835 inBufferList->mBuffers[handle->iStream[1]+i].mData, bufferBytes );
1838 else { // read from multiple multi-channel streams
1839 UInt32 streamChannels, channelsLeft, inJump, outJump, outOffset;
1842 bool outInterleaved = ( stream_.userInterleaved ) ? true : false;
1843 UInt32 outChannels = stream_.nUserChannels[1];
1844 if ( stream_.doConvertBuffer[1] ) {
1845 outInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
1846 outChannels = stream_.nDeviceChannels[1];
1849 if ( outInterleaved ) outOffset = 1;
1850 else outOffset = stream_.bufferSize;
1852 channelsLeft = outChannels;
1853 for ( unsigned int i=0; i<handle->nStreams[1]; i++ ) {
1855 in = (Float32 *) inBufferList->mBuffers[handle->iStream[1]+i].mData;
1856 streamChannels = inBufferList->mBuffers[handle->iStream[1]+i].mNumberChannels;
1859 // Account for possible channel offset in first stream
1860 if ( i == 0 && stream_.channelOffset[1] > 0 ) {
1861 streamChannels -= stream_.channelOffset[1];
1862 inJump = stream_.channelOffset[1];
1866 // Account for possible unread channels at end of the last stream
1867 if ( streamChannels > channelsLeft ) {
1868 inJump = streamChannels - channelsLeft;
1869 streamChannels = channelsLeft;
1872 // Determine output buffer offsets and skips
1873 if ( outInterleaved ) {
1874 outJump = outChannels;
1875 out += outChannels - channelsLeft;
1879 out += (outChannels - channelsLeft) * outOffset;
1882 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
1883 for ( unsigned int j=0; j<streamChannels; j++ ) {
1884 out[j*outOffset] = *in++;
1889 channelsLeft -= streamChannels;
1893 if ( stream_.doConvertBuffer[1] ) { // convert from our internal "device" buffer
1894 convertBuffer( stream_.userBuffer[1],
1895 stream_.deviceBuffer,
1896 stream_.convertInfo[1] );
1902 //MUTEX_UNLOCK( &stream_.mutex );
1904 RtApi::tickStreamTime();
1908 const char* RtApiCore :: getErrorCode( OSStatus code )
1912 case kAudioHardwareNotRunningError:
1913 return "kAudioHardwareNotRunningError";
1915 case kAudioHardwareUnspecifiedError:
1916 return "kAudioHardwareUnspecifiedError";
1918 case kAudioHardwareUnknownPropertyError:
1919 return "kAudioHardwareUnknownPropertyError";
1921 case kAudioHardwareBadPropertySizeError:
1922 return "kAudioHardwareBadPropertySizeError";
1924 case kAudioHardwareIllegalOperationError:
1925 return "kAudioHardwareIllegalOperationError";
1927 case kAudioHardwareBadObjectError:
1928 return "kAudioHardwareBadObjectError";
1930 case kAudioHardwareBadDeviceError:
1931 return "kAudioHardwareBadDeviceError";
1933 case kAudioHardwareBadStreamError:
1934 return "kAudioHardwareBadStreamError";
1936 case kAudioHardwareUnsupportedOperationError:
1937 return "kAudioHardwareUnsupportedOperationError";
1939 case kAudioDeviceUnsupportedFormatError:
1940 return "kAudioDeviceUnsupportedFormatError";
1942 case kAudioDevicePermissionsError:
1943 return "kAudioDevicePermissionsError";
1946 return "CoreAudio unknown error";
1950 //******************** End of __MACOSX_CORE__ *********************//
1953 #if defined(__UNIX_JACK__)
1955 // JACK is a low-latency audio server, originally written for the
1956 // GNU/Linux operating system and now also ported to OS-X. It can
1957 // connect a number of different applications to an audio device, as
1958 // well as allowing them to share audio between themselves.
1960 // When using JACK with RtAudio, "devices" refer to JACK clients that
1961 // have ports connected to the server. The JACK server is typically
1962 // started in a terminal as follows:
1964 // .jackd -d alsa -d hw:0
1966 // or through an interface program such as qjackctl. Many of the
1967 // parameters normally set for a stream are fixed by the JACK server
1968 // and can be specified when the JACK server is started. In
1971 // .jackd -d alsa -d hw:0 -r 44100 -p 512 -n 4
1973 // specifies a sample rate of 44100 Hz, a buffer size of 512 sample
1974 // frames, and number of buffers = 4. Once the server is running, it
1975 // is not possible to override these values. If the values are not
1976 // specified in the command-line, the JACK server uses default values.
1978 // The JACK server does not have to be running when an instance of
1979 // RtApiJack is created, though the function getDeviceCount() will
1980 // report 0 devices found until JACK has been started. When no
1981 // devices are available (i.e., the JACK server is not running), a
1982 // stream cannot be opened.
1984 #include <jack/jack.h>
1988 // A structure to hold various information related to the Jack API
1991 jack_client_t *client;
1992 jack_port_t **ports[2];
1993 std::string deviceName[2];
1995 pthread_cond_t condition;
1996 int drainCounter; // Tracks callback counts when draining
1997 bool internalDrain; // Indicates if stop is initiated from callback or not.
2000 :client(0), drainCounter(0), internalDrain(false) { ports[0] = 0; ports[1] = 0; xrun[0] = false; xrun[1] = false; }
2003 #if !defined(__RTAUDIO_DEBUG__)
2004 static void jackSilentError( const char * ) {};
2007 RtApiJack :: RtApiJack()
2008 :shouldAutoconnect_(true) {
2009 // Nothing to do here.
2010 #if !defined(__RTAUDIO_DEBUG__)
2011 // Turn off Jack's internal error reporting.
2012 jack_set_error_function( &jackSilentError );
2016 RtApiJack :: ~RtApiJack()
2018 if ( stream_.state != STREAM_CLOSED ) closeStream();
2021 unsigned int RtApiJack :: getDeviceCount( void )
2023 // See if we can become a jack client.
2024 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
2025 jack_status_t *status = NULL;
2026 jack_client_t *client = jack_client_open( "RtApiJackCount", options, status );
2027 if ( client == 0 ) return 0;
2030 std::string port, previousPort;
2031 unsigned int nChannels = 0, nDevices = 0;
2032 ports = jack_get_ports( client, NULL, JACK_DEFAULT_AUDIO_TYPE, 0 );
2034 // Parse the port names up to the first colon (:).
2037 port = (char *) ports[ nChannels ];
2038 iColon = port.find(":");
2039 if ( iColon != std::string::npos ) {
2040 port = port.substr( 0, iColon + 1 );
2041 if ( port != previousPort ) {
2043 previousPort = port;
2046 } while ( ports[++nChannels] );
2050 jack_client_close( client );
2054 RtAudio::DeviceInfo RtApiJack :: getDeviceInfo( unsigned int device )
2056 RtAudio::DeviceInfo info;
2057 info.probed = false;
2059 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption
2060 jack_status_t *status = NULL;
2061 jack_client_t *client = jack_client_open( "RtApiJackInfo", options, status );
2062 if ( client == 0 ) {
2063 errorText_ = "RtApiJack::getDeviceInfo: Jack server not found or connection error!";
2064 error( RtAudioError::WARNING );
2069 std::string port, previousPort;
2070 unsigned int nPorts = 0, nDevices = 0;
2071 ports = jack_get_ports( client, NULL, JACK_DEFAULT_AUDIO_TYPE, 0 );
2073 // Parse the port names up to the first colon (:).
2076 port = (char *) ports[ nPorts ];
2077 iColon = port.find(":");
2078 if ( iColon != std::string::npos ) {
2079 port = port.substr( 0, iColon );
2080 if ( port != previousPort ) {
2081 if ( nDevices == device ) info.name = port;
2083 previousPort = port;
2086 } while ( ports[++nPorts] );
2090 if ( device >= nDevices ) {
2091 jack_client_close( client );
2092 errorText_ = "RtApiJack::getDeviceInfo: device ID is invalid!";
2093 error( RtAudioError::INVALID_USE );
2097 // Get the current jack server sample rate.
2098 info.sampleRates.clear();
2100 info.preferredSampleRate = jack_get_sample_rate( client );
2101 info.sampleRates.push_back( info.preferredSampleRate );
2103 // Count the available ports containing the client name as device
2104 // channels. Jack "input ports" equal RtAudio output channels.
2105 unsigned int nChannels = 0;
2106 ports = jack_get_ports( client, info.name.c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput );
2108 while ( ports[ nChannels ] ) nChannels++;
2110 info.outputChannels = nChannels;
2113 // Jack "output ports" equal RtAudio input channels.
2115 ports = jack_get_ports( client, info.name.c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput );
2117 while ( ports[ nChannels ] ) nChannels++;
2119 info.inputChannels = nChannels;
2122 if ( info.outputChannels == 0 && info.inputChannels == 0 ) {
2123 jack_client_close(client);
2124 errorText_ = "RtApiJack::getDeviceInfo: error determining Jack input/output channels!";
2125 error( RtAudioError::WARNING );
2129 // If device opens for both playback and capture, we determine the channels.
2130 if ( info.outputChannels > 0 && info.inputChannels > 0 )
2131 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
2133 // Jack always uses 32-bit floats.
2134 info.nativeFormats = RTAUDIO_FLOAT32;
2136 // Jack doesn't provide default devices so we'll use the first available one.
2137 if ( device == 0 && info.outputChannels > 0 )
2138 info.isDefaultOutput = true;
2139 if ( device == 0 && info.inputChannels > 0 )
2140 info.isDefaultInput = true;
2142 jack_client_close(client);
2147 static int jackCallbackHandler( jack_nframes_t nframes, void *infoPointer )
2149 CallbackInfo *info = (CallbackInfo *) infoPointer;
2151 RtApiJack *object = (RtApiJack *) info->object;
2152 if ( object->callbackEvent( (unsigned long) nframes ) == false ) return 1;
2157 // This function will be called by a spawned thread when the Jack
2158 // server signals that it is shutting down. It is necessary to handle
2159 // it this way because the jackShutdown() function must return before
2160 // the jack_deactivate() function (in closeStream()) will return.
2161 static void *jackCloseStream( void *ptr )
2163 CallbackInfo *info = (CallbackInfo *) ptr;
2164 RtApiJack *object = (RtApiJack *) info->object;
2166 object->closeStream();
2168 pthread_exit( NULL );
2170 static void jackShutdown( void *infoPointer )
2172 CallbackInfo *info = (CallbackInfo *) infoPointer;
2173 RtApiJack *object = (RtApiJack *) info->object;
2175 // Check current stream state. If stopped, then we'll assume this
2176 // was called as a result of a call to RtApiJack::stopStream (the
2177 // deactivation of a client handle causes this function to be called).
2178 // If not, we'll assume the Jack server is shutting down or some
2179 // other problem occurred and we should close the stream.
2180 if ( object->isStreamRunning() == false ) return;
2182 ThreadHandle threadId;
2183 pthread_create( &threadId, NULL, jackCloseStream, info );
2184 std::cerr << "\nRtApiJack: the Jack server is shutting down this client ... stream stopped and closed!!\n" << std::endl;
2187 static int jackXrun( void *infoPointer )
2189 JackHandle *handle = *((JackHandle **) infoPointer);
2191 if ( handle->ports[0] ) handle->xrun[0] = true;
2192 if ( handle->ports[1] ) handle->xrun[1] = true;
2197 bool RtApiJack :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
2198 unsigned int firstChannel, unsigned int sampleRate,
2199 RtAudioFormat format, unsigned int *bufferSize,
2200 RtAudio::StreamOptions *options )
2202 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2204 // Look for jack server and try to become a client (only do once per stream).
2205 jack_client_t *client = 0;
2206 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) {
2207 jack_options_t jackoptions = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
2208 jack_status_t *status = NULL;
2209 if ( options && !options->streamName.empty() )
2210 client = jack_client_open( options->streamName.c_str(), jackoptions, status );
2212 client = jack_client_open( "RtApiJack", jackoptions, status );
2213 if ( client == 0 ) {
2214 errorText_ = "RtApiJack::probeDeviceOpen: Jack server not found or connection error!";
2215 error( RtAudioError::WARNING );
2220 // The handle must have been created on an earlier pass.
2221 client = handle->client;
2225 std::string port, previousPort, deviceName;
2226 unsigned int nPorts = 0, nDevices = 0;
2227 ports = jack_get_ports( client, NULL, JACK_DEFAULT_AUDIO_TYPE, 0 );
2229 // Parse the port names up to the first colon (:).
2232 port = (char *) ports[ nPorts ];
2233 iColon = port.find(":");
2234 if ( iColon != std::string::npos ) {
2235 port = port.substr( 0, iColon );
2236 if ( port != previousPort ) {
2237 if ( nDevices == device ) deviceName = port;
2239 previousPort = port;
2242 } while ( ports[++nPorts] );
2246 if ( device >= nDevices ) {
2247 errorText_ = "RtApiJack::probeDeviceOpen: device ID is invalid!";
2251 unsigned long flag = JackPortIsInput;
2252 if ( mode == INPUT ) flag = JackPortIsOutput;
2254 if ( ! (options && (options->flags & RTAUDIO_JACK_DONT_CONNECT)) ) {
2255 // Count the available ports containing the client name as device
2256 // channels. Jack "input ports" equal RtAudio output channels.
2257 unsigned int nChannels = 0;
2258 ports = jack_get_ports( client, deviceName.c_str(), JACK_DEFAULT_AUDIO_TYPE, flag );
2260 while ( ports[ nChannels ] ) nChannels++;
2263 // Compare the jack ports for specified client to the requested number of channels.
2264 if ( nChannels < (channels + firstChannel) ) {
2265 errorStream_ << "RtApiJack::probeDeviceOpen: requested number of channels (" << channels << ") + offset (" << firstChannel << ") not found for specified device (" << device << ":" << deviceName << ").";
2266 errorText_ = errorStream_.str();
2271 // Check the jack server sample rate.
2272 unsigned int jackRate = jack_get_sample_rate( client );
2273 if ( sampleRate != jackRate ) {
2274 jack_client_close( client );
2275 errorStream_ << "RtApiJack::probeDeviceOpen: the requested sample rate (" << sampleRate << ") is different than the JACK server rate (" << jackRate << ").";
2276 errorText_ = errorStream_.str();
2279 stream_.sampleRate = jackRate;
2281 // Get the latency of the JACK port.
2282 ports = jack_get_ports( client, deviceName.c_str(), JACK_DEFAULT_AUDIO_TYPE, flag );
2283 if ( ports[ firstChannel ] ) {
2285 jack_latency_callback_mode_t cbmode = (mode == INPUT ? JackCaptureLatency : JackPlaybackLatency);
2286 // the range (usually the min and max are equal)
2287 jack_latency_range_t latrange; latrange.min = latrange.max = 0;
2288 // get the latency range
2289 jack_port_get_latency_range( jack_port_by_name( client, ports[firstChannel] ), cbmode, &latrange );
2290 // be optimistic, use the min!
2291 stream_.latency[mode] = latrange.min;
2292 //stream_.latency[mode] = jack_port_get_latency( jack_port_by_name( client, ports[ firstChannel ] ) );
2296 // The jack server always uses 32-bit floating-point data.
2297 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
2298 stream_.userFormat = format;
2300 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
2301 else stream_.userInterleaved = true;
2303 // Jack always uses non-interleaved buffers.
2304 stream_.deviceInterleaved[mode] = false;
2306 // Jack always provides host byte-ordered data.
2307 stream_.doByteSwap[mode] = false;
2309 // Get the buffer size. The buffer size and number of buffers
2310 // (periods) is set when the jack server is started.
2311 stream_.bufferSize = (int) jack_get_buffer_size( client );
2312 *bufferSize = stream_.bufferSize;
2314 stream_.nDeviceChannels[mode] = channels;
2315 stream_.nUserChannels[mode] = channels;
2317 // Set flags for buffer conversion.
2318 stream_.doConvertBuffer[mode] = false;
2319 if ( stream_.userFormat != stream_.deviceFormat[mode] )
2320 stream_.doConvertBuffer[mode] = true;
2321 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
2322 stream_.nUserChannels[mode] > 1 )
2323 stream_.doConvertBuffer[mode] = true;
2325 // Allocate our JackHandle structure for the stream.
2326 if ( handle == 0 ) {
2328 handle = new JackHandle;
2330 catch ( std::bad_alloc& ) {
2331 errorText_ = "RtApiJack::probeDeviceOpen: error allocating JackHandle memory.";
2335 if ( pthread_cond_init(&handle->condition, NULL) ) {
2336 errorText_ = "RtApiJack::probeDeviceOpen: error initializing pthread condition variable.";
2339 stream_.apiHandle = (void *) handle;
2340 handle->client = client;
2342 handle->deviceName[mode] = deviceName;
2344 // Allocate necessary internal buffers.
2345 unsigned long bufferBytes;
2346 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
2347 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
2348 if ( stream_.userBuffer[mode] == NULL ) {
2349 errorText_ = "RtApiJack::probeDeviceOpen: error allocating user buffer memory.";
2353 if ( stream_.doConvertBuffer[mode] ) {
2355 bool makeBuffer = true;
2356 if ( mode == OUTPUT )
2357 bufferBytes = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
2358 else { // mode == INPUT
2359 bufferBytes = stream_.nDeviceChannels[1] * formatBytes( stream_.deviceFormat[1] );
2360 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
2361 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes(stream_.deviceFormat[0]);
2362 if ( bufferBytes < bytesOut ) makeBuffer = false;
2367 bufferBytes *= *bufferSize;
2368 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
2369 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
2370 if ( stream_.deviceBuffer == NULL ) {
2371 errorText_ = "RtApiJack::probeDeviceOpen: error allocating device buffer memory.";
2377 // Allocate memory for the Jack ports (channels) identifiers.
2378 handle->ports[mode] = (jack_port_t **) malloc ( sizeof (jack_port_t *) * channels );
2379 if ( handle->ports[mode] == NULL ) {
2380 errorText_ = "RtApiJack::probeDeviceOpen: error allocating port memory.";
2384 stream_.device[mode] = device;
2385 stream_.channelOffset[mode] = firstChannel;
2386 stream_.state = STREAM_STOPPED;
2387 stream_.callbackInfo.object = (void *) this;
2389 if ( stream_.mode == OUTPUT && mode == INPUT )
2390 // We had already set up the stream for output.
2391 stream_.mode = DUPLEX;
2393 stream_.mode = mode;
2394 jack_set_process_callback( handle->client, jackCallbackHandler, (void *) &stream_.callbackInfo );
2395 jack_set_xrun_callback( handle->client, jackXrun, (void *) &stream_.apiHandle );
2396 jack_on_shutdown( handle->client, jackShutdown, (void *) &stream_.callbackInfo );
2399 // Register our ports.
2401 if ( mode == OUTPUT ) {
2402 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2403 snprintf( label, 64, "outport %d", i );
2404 handle->ports[0][i] = jack_port_register( handle->client, (const char *)label,
2405 JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput, 0 );
2409 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2410 snprintf( label, 64, "inport %d", i );
2411 handle->ports[1][i] = jack_port_register( handle->client, (const char *)label,
2412 JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput, 0 );
2416 // Setup the buffer conversion information structure. We don't use
2417 // buffers to do channel offsets, so we override that parameter
2419 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
2421 if ( options && options->flags & RTAUDIO_JACK_DONT_CONNECT ) shouldAutoconnect_ = false;
2427 pthread_cond_destroy( &handle->condition );
2428 jack_client_close( handle->client );
2430 if ( handle->ports[0] ) free( handle->ports[0] );
2431 if ( handle->ports[1] ) free( handle->ports[1] );
2434 stream_.apiHandle = 0;
2437 for ( int i=0; i<2; i++ ) {
2438 if ( stream_.userBuffer[i] ) {
2439 free( stream_.userBuffer[i] );
2440 stream_.userBuffer[i] = 0;
2444 if ( stream_.deviceBuffer ) {
2445 free( stream_.deviceBuffer );
2446 stream_.deviceBuffer = 0;
2452 void RtApiJack :: closeStream( void )
2454 if ( stream_.state == STREAM_CLOSED ) {
2455 errorText_ = "RtApiJack::closeStream(): no open stream to close!";
2456 error( RtAudioError::WARNING );
2460 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2463 if ( stream_.state == STREAM_RUNNING )
2464 jack_deactivate( handle->client );
2466 jack_client_close( handle->client );
2470 if ( handle->ports[0] ) free( handle->ports[0] );
2471 if ( handle->ports[1] ) free( handle->ports[1] );
2472 pthread_cond_destroy( &handle->condition );
2474 stream_.apiHandle = 0;
2477 for ( int i=0; i<2; i++ ) {
2478 if ( stream_.userBuffer[i] ) {
2479 free( stream_.userBuffer[i] );
2480 stream_.userBuffer[i] = 0;
2484 if ( stream_.deviceBuffer ) {
2485 free( stream_.deviceBuffer );
2486 stream_.deviceBuffer = 0;
2489 stream_.mode = UNINITIALIZED;
2490 stream_.state = STREAM_CLOSED;
2493 void RtApiJack :: startStream( void )
2496 if ( stream_.state == STREAM_RUNNING ) {
2497 errorText_ = "RtApiJack::startStream(): the stream is already running!";
2498 error( RtAudioError::WARNING );
2502 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2503 int result = jack_activate( handle->client );
2505 errorText_ = "RtApiJack::startStream(): unable to activate JACK client!";
2511 // Get the list of available ports.
2512 if ( shouldAutoconnect_ && (stream_.mode == OUTPUT || stream_.mode == DUPLEX) ) {
2514 ports = jack_get_ports( handle->client, handle->deviceName[0].c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput);
2515 if ( ports == NULL) {
2516 errorText_ = "RtApiJack::startStream(): error determining available JACK input ports!";
2520 // Now make the port connections. Since RtAudio wasn't designed to
2521 // allow the user to select particular channels of a device, we'll
2522 // just open the first "nChannels" ports with offset.
2523 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2525 if ( ports[ stream_.channelOffset[0] + i ] )
2526 result = jack_connect( handle->client, jack_port_name( handle->ports[0][i] ), ports[ stream_.channelOffset[0] + i ] );
2529 errorText_ = "RtApiJack::startStream(): error connecting output ports!";
2536 if ( shouldAutoconnect_ && (stream_.mode == INPUT || stream_.mode == DUPLEX) ) {
2538 ports = jack_get_ports( handle->client, handle->deviceName[1].c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput );
2539 if ( ports == NULL) {
2540 errorText_ = "RtApiJack::startStream(): error determining available JACK output ports!";
2544 // Now make the port connections. See note above.
2545 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2547 if ( ports[ stream_.channelOffset[1] + i ] )
2548 result = jack_connect( handle->client, ports[ stream_.channelOffset[1] + i ], jack_port_name( handle->ports[1][i] ) );
2551 errorText_ = "RtApiJack::startStream(): error connecting input ports!";
2558 handle->drainCounter = 0;
2559 handle->internalDrain = false;
2560 stream_.state = STREAM_RUNNING;
2563 if ( result == 0 ) return;
2564 error( RtAudioError::SYSTEM_ERROR );
2567 void RtApiJack :: stopStream( void )
2570 if ( stream_.state == STREAM_STOPPED ) {
2571 errorText_ = "RtApiJack::stopStream(): the stream is already stopped!";
2572 error( RtAudioError::WARNING );
2576 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2577 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
2579 if ( handle->drainCounter == 0 ) {
2580 handle->drainCounter = 2;
2581 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
2585 jack_deactivate( handle->client );
2586 stream_.state = STREAM_STOPPED;
2589 void RtApiJack :: abortStream( void )
2592 if ( stream_.state == STREAM_STOPPED ) {
2593 errorText_ = "RtApiJack::abortStream(): the stream is already stopped!";
2594 error( RtAudioError::WARNING );
2598 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2599 handle->drainCounter = 2;
2604 // This function will be called by a spawned thread when the user
2605 // callback function signals that the stream should be stopped or
2606 // aborted. It is necessary to handle it this way because the
2607 // callbackEvent() function must return before the jack_deactivate()
2608 // function will return.
2609 static void *jackStopStream( void *ptr )
2611 CallbackInfo *info = (CallbackInfo *) ptr;
2612 RtApiJack *object = (RtApiJack *) info->object;
2614 object->stopStream();
2615 pthread_exit( NULL );
2618 bool RtApiJack :: callbackEvent( unsigned long nframes )
2620 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
2621 if ( stream_.state == STREAM_CLOSED ) {
2622 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
2623 error( RtAudioError::WARNING );
2626 if ( stream_.bufferSize != nframes ) {
2627 errorText_ = "RtApiCore::callbackEvent(): the JACK buffer size has changed ... cannot process!";
2628 error( RtAudioError::WARNING );
2632 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
2633 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2635 // Check if we were draining the stream and signal is finished.
2636 if ( handle->drainCounter > 3 ) {
2637 ThreadHandle threadId;
2639 stream_.state = STREAM_STOPPING;
2640 if ( handle->internalDrain == true )
2641 pthread_create( &threadId, NULL, jackStopStream, info );
2643 pthread_cond_signal( &handle->condition );
2647 // Invoke user callback first, to get fresh output data.
2648 if ( handle->drainCounter == 0 ) {
2649 RtAudioCallback callback = (RtAudioCallback) info->callback;
2650 double streamTime = getStreamTime();
2651 RtAudioStreamStatus status = 0;
2652 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
2653 status |= RTAUDIO_OUTPUT_UNDERFLOW;
2654 handle->xrun[0] = false;
2656 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
2657 status |= RTAUDIO_INPUT_OVERFLOW;
2658 handle->xrun[1] = false;
2660 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
2661 stream_.bufferSize, streamTime, status, info->userData );
2662 if ( cbReturnValue == 2 ) {
2663 stream_.state = STREAM_STOPPING;
2664 handle->drainCounter = 2;
2666 pthread_create( &id, NULL, jackStopStream, info );
2669 else if ( cbReturnValue == 1 ) {
2670 handle->drainCounter = 1;
2671 handle->internalDrain = true;
2675 jack_default_audio_sample_t *jackbuffer;
2676 unsigned long bufferBytes = nframes * sizeof( jack_default_audio_sample_t );
2677 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
2679 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
2681 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
2682 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2683 memset( jackbuffer, 0, bufferBytes );
2687 else if ( stream_.doConvertBuffer[0] ) {
2689 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
2691 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
2692 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2693 memcpy( jackbuffer, &stream_.deviceBuffer[i*bufferBytes], bufferBytes );
2696 else { // no buffer conversion
2697 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2698 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2699 memcpy( jackbuffer, &stream_.userBuffer[0][i*bufferBytes], bufferBytes );
2704 // Don't bother draining input
2705 if ( handle->drainCounter ) {
2706 handle->drainCounter++;
2710 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
2712 if ( stream_.doConvertBuffer[1] ) {
2713 for ( unsigned int i=0; i<stream_.nDeviceChannels[1]; i++ ) {
2714 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
2715 memcpy( &stream_.deviceBuffer[i*bufferBytes], jackbuffer, bufferBytes );
2717 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
2719 else { // no buffer conversion
2720 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2721 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
2722 memcpy( &stream_.userBuffer[1][i*bufferBytes], jackbuffer, bufferBytes );
2728 RtApi::tickStreamTime();
2731 //******************** End of __UNIX_JACK__ *********************//
2734 #if defined(__WINDOWS_ASIO__) // ASIO API on Windows
2736 // The ASIO API is designed around a callback scheme, so this
2737 // implementation is similar to that used for OS-X CoreAudio and Linux
2738 // Jack. The primary constraint with ASIO is that it only allows
2739 // access to a single driver at a time. Thus, it is not possible to
2740 // have more than one simultaneous RtAudio stream.
2742 // This implementation also requires a number of external ASIO files
2743 // and a few global variables. The ASIO callback scheme does not
2744 // allow for the passing of user data, so we must create a global
2745 // pointer to our callbackInfo structure.
2747 // On unix systems, we make use of a pthread condition variable.
2748 // Since there is no equivalent in Windows, I hacked something based
2749 // on information found in
2750 // http://www.cs.wustl.edu/~schmidt/win32-cv-1.html.
2752 #include "asiosys.h"
2754 #include "iasiothiscallresolver.h"
2755 #include "asiodrivers.h"
2758 static AsioDrivers drivers;
2759 static ASIOCallbacks asioCallbacks;
2760 static ASIODriverInfo driverInfo;
2761 static CallbackInfo *asioCallbackInfo;
2762 static bool asioXRun;
2765 int drainCounter; // Tracks callback counts when draining
2766 bool internalDrain; // Indicates if stop is initiated from callback or not.
2767 ASIOBufferInfo *bufferInfos;
2771 :drainCounter(0), internalDrain(false), bufferInfos(0) {}
2774 // Function declarations (definitions at end of section)
2775 static const char* getAsioErrorString( ASIOError result );
2776 static void sampleRateChanged( ASIOSampleRate sRate );
2777 static long asioMessages( long selector, long value, void* message, double* opt );
2779 RtApiAsio :: RtApiAsio()
2781 // ASIO cannot run on a multi-threaded appartment. You can call
2782 // CoInitialize beforehand, but it must be for appartment threading
2783 // (in which case, CoInitilialize will return S_FALSE here).
2784 coInitialized_ = false;
2785 HRESULT hr = CoInitialize( NULL );
2787 errorText_ = "RtApiAsio::ASIO requires a single-threaded appartment. Call CoInitializeEx(0,COINIT_APARTMENTTHREADED)";
2788 error( RtAudioError::WARNING );
2790 coInitialized_ = true;
2792 drivers.removeCurrentDriver();
2793 driverInfo.asioVersion = 2;
2795 // See note in DirectSound implementation about GetDesktopWindow().
2796 driverInfo.sysRef = GetForegroundWindow();
2799 RtApiAsio :: ~RtApiAsio()
2801 if ( stream_.state != STREAM_CLOSED ) closeStream();
2802 if ( coInitialized_ ) CoUninitialize();
2805 unsigned int RtApiAsio :: getDeviceCount( void )
2807 return (unsigned int) drivers.asioGetNumDev();
2810 RtAudio::DeviceInfo RtApiAsio :: getDeviceInfo( unsigned int device )
2812 RtAudio::DeviceInfo info;
2813 info.probed = false;
2816 unsigned int nDevices = getDeviceCount();
2817 if ( nDevices == 0 ) {
2818 errorText_ = "RtApiAsio::getDeviceInfo: no devices found!";
2819 error( RtAudioError::INVALID_USE );
2823 if ( device >= nDevices ) {
2824 errorText_ = "RtApiAsio::getDeviceInfo: device ID is invalid!";
2825 error( RtAudioError::INVALID_USE );
2829 // If a stream is already open, we cannot probe other devices. Thus, use the saved results.
2830 if ( stream_.state != STREAM_CLOSED ) {
2831 if ( device >= devices_.size() ) {
2832 errorText_ = "RtApiAsio::getDeviceInfo: device ID was not present before stream was opened.";
2833 error( RtAudioError::WARNING );
2836 return devices_[ device ];
2839 char driverName[32];
2840 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
2841 if ( result != ASE_OK ) {
2842 errorStream_ << "RtApiAsio::getDeviceInfo: unable to get driver name (" << getAsioErrorString( result ) << ").";
2843 errorText_ = errorStream_.str();
2844 error( RtAudioError::WARNING );
2848 info.name = driverName;
2850 if ( !drivers.loadDriver( driverName ) ) {
2851 errorStream_ << "RtApiAsio::getDeviceInfo: unable to load driver (" << driverName << ").";
2852 errorText_ = errorStream_.str();
2853 error( RtAudioError::WARNING );
2857 result = ASIOInit( &driverInfo );
2858 if ( result != ASE_OK ) {
2859 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
2860 errorText_ = errorStream_.str();
2861 error( RtAudioError::WARNING );
2865 // Determine the device channel information.
2866 long inputChannels, outputChannels;
2867 result = ASIOGetChannels( &inputChannels, &outputChannels );
2868 if ( result != ASE_OK ) {
2869 drivers.removeCurrentDriver();
2870 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
2871 errorText_ = errorStream_.str();
2872 error( RtAudioError::WARNING );
2876 info.outputChannels = outputChannels;
2877 info.inputChannels = inputChannels;
2878 if ( info.outputChannels > 0 && info.inputChannels > 0 )
2879 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
2881 // Determine the supported sample rates.
2882 info.sampleRates.clear();
2883 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
2884 result = ASIOCanSampleRate( (ASIOSampleRate) SAMPLE_RATES[i] );
2885 if ( result == ASE_OK ) {
2886 info.sampleRates.push_back( SAMPLE_RATES[i] );
2888 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
2889 info.preferredSampleRate = SAMPLE_RATES[i];
2893 // Determine supported data types ... just check first channel and assume rest are the same.
2894 ASIOChannelInfo channelInfo;
2895 channelInfo.channel = 0;
2896 channelInfo.isInput = true;
2897 if ( info.inputChannels <= 0 ) channelInfo.isInput = false;
2898 result = ASIOGetChannelInfo( &channelInfo );
2899 if ( result != ASE_OK ) {
2900 drivers.removeCurrentDriver();
2901 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting driver channel info (" << driverName << ").";
2902 errorText_ = errorStream_.str();
2903 error( RtAudioError::WARNING );
2907 info.nativeFormats = 0;
2908 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB )
2909 info.nativeFormats |= RTAUDIO_SINT16;
2910 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB )
2911 info.nativeFormats |= RTAUDIO_SINT32;
2912 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB )
2913 info.nativeFormats |= RTAUDIO_FLOAT32;
2914 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB )
2915 info.nativeFormats |= RTAUDIO_FLOAT64;
2916 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB )
2917 info.nativeFormats |= RTAUDIO_SINT24;
2919 if ( info.outputChannels > 0 )
2920 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
2921 if ( info.inputChannels > 0 )
2922 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
2925 drivers.removeCurrentDriver();
2929 static void bufferSwitch( long index, ASIOBool /*processNow*/ )
2931 RtApiAsio *object = (RtApiAsio *) asioCallbackInfo->object;
2932 object->callbackEvent( index );
2935 void RtApiAsio :: saveDeviceInfo( void )
2939 unsigned int nDevices = getDeviceCount();
2940 devices_.resize( nDevices );
2941 for ( unsigned int i=0; i<nDevices; i++ )
2942 devices_[i] = getDeviceInfo( i );
2945 bool RtApiAsio :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
2946 unsigned int firstChannel, unsigned int sampleRate,
2947 RtAudioFormat format, unsigned int *bufferSize,
2948 RtAudio::StreamOptions *options )
2949 {////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
2951 bool isDuplexInput = mode == INPUT && stream_.mode == OUTPUT;
2953 // For ASIO, a duplex stream MUST use the same driver.
2954 if ( isDuplexInput && stream_.device[0] != device ) {
2955 errorText_ = "RtApiAsio::probeDeviceOpen: an ASIO duplex stream must use the same device for input and output!";
2959 char driverName[32];
2960 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
2961 if ( result != ASE_OK ) {
2962 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to get driver name (" << getAsioErrorString( result ) << ").";
2963 errorText_ = errorStream_.str();
2967 // Only load the driver once for duplex stream.
2968 if ( !isDuplexInput ) {
2969 // The getDeviceInfo() function will not work when a stream is open
2970 // because ASIO does not allow multiple devices to run at the same
2971 // time. Thus, we'll probe the system before opening a stream and
2972 // save the results for use by getDeviceInfo().
2973 this->saveDeviceInfo();
2975 if ( !drivers.loadDriver( driverName ) ) {
2976 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to load driver (" << driverName << ").";
2977 errorText_ = errorStream_.str();
2981 result = ASIOInit( &driverInfo );
2982 if ( result != ASE_OK ) {
2983 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
2984 errorText_ = errorStream_.str();
2989 // keep them before any "goto error", they are used for error cleanup + goto device boundary checks
2990 bool buffersAllocated = false;
2991 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
2992 unsigned int nChannels;
2995 // Check the device channel count.
2996 long inputChannels, outputChannels;
2997 result = ASIOGetChannels( &inputChannels, &outputChannels );
2998 if ( result != ASE_OK ) {
2999 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
3000 errorText_ = errorStream_.str();
3004 if ( ( mode == OUTPUT && (channels+firstChannel) > (unsigned int) outputChannels) ||
3005 ( mode == INPUT && (channels+firstChannel) > (unsigned int) inputChannels) ) {
3006 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested channel count (" << channels << ") + offset (" << firstChannel << ").";
3007 errorText_ = errorStream_.str();
3010 stream_.nDeviceChannels[mode] = channels;
3011 stream_.nUserChannels[mode] = channels;
3012 stream_.channelOffset[mode] = firstChannel;
3014 // Verify the sample rate is supported.
3015 result = ASIOCanSampleRate( (ASIOSampleRate) sampleRate );
3016 if ( result != ASE_OK ) {
3017 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested sample rate (" << sampleRate << ").";
3018 errorText_ = errorStream_.str();
3022 // Get the current sample rate
3023 ASIOSampleRate currentRate;
3024 result = ASIOGetSampleRate( ¤tRate );
3025 if ( result != ASE_OK ) {
3026 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error getting sample rate.";
3027 errorText_ = errorStream_.str();
3031 // Set the sample rate only if necessary
3032 if ( currentRate != sampleRate ) {
3033 result = ASIOSetSampleRate( (ASIOSampleRate) sampleRate );
3034 if ( result != ASE_OK ) {
3035 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error setting sample rate (" << sampleRate << ").";
3036 errorText_ = errorStream_.str();
3041 // Determine the driver data type.
3042 ASIOChannelInfo channelInfo;
3043 channelInfo.channel = 0;
3044 if ( mode == OUTPUT ) channelInfo.isInput = false;
3045 else channelInfo.isInput = true;
3046 result = ASIOGetChannelInfo( &channelInfo );
3047 if ( result != ASE_OK ) {
3048 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting data format.";
3049 errorText_ = errorStream_.str();
3053 // Assuming WINDOWS host is always little-endian.
3054 stream_.doByteSwap[mode] = false;
3055 stream_.userFormat = format;
3056 stream_.deviceFormat[mode] = 0;
3057 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB ) {
3058 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
3059 if ( channelInfo.type == ASIOSTInt16MSB ) stream_.doByteSwap[mode] = true;
3061 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB ) {
3062 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
3063 if ( channelInfo.type == ASIOSTInt32MSB ) stream_.doByteSwap[mode] = true;
3065 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB ) {
3066 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
3067 if ( channelInfo.type == ASIOSTFloat32MSB ) stream_.doByteSwap[mode] = true;
3069 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB ) {
3070 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
3071 if ( channelInfo.type == ASIOSTFloat64MSB ) stream_.doByteSwap[mode] = true;
3073 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB ) {
3074 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
3075 if ( channelInfo.type == ASIOSTInt24MSB ) stream_.doByteSwap[mode] = true;
3078 if ( stream_.deviceFormat[mode] == 0 ) {
3079 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") data format not supported by RtAudio.";
3080 errorText_ = errorStream_.str();
3084 // Set the buffer size. For a duplex stream, this will end up
3085 // setting the buffer size based on the input constraints, which
3087 long minSize, maxSize, preferSize, granularity;
3088 result = ASIOGetBufferSize( &minSize, &maxSize, &preferSize, &granularity );
3089 if ( result != ASE_OK ) {
3090 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting buffer size.";
3091 errorText_ = errorStream_.str();
3095 if ( isDuplexInput ) {
3096 // When this is the duplex input (output was opened before), then we have to use the same
3097 // buffersize as the output, because it might use the preferred buffer size, which most
3098 // likely wasn't passed as input to this. The buffer sizes have to be identically anyway,
3099 // So instead of throwing an error, make them equal. The caller uses the reference
3100 // to the "bufferSize" param as usual to set up processing buffers.
3102 *bufferSize = stream_.bufferSize;
3105 if ( *bufferSize == 0 ) *bufferSize = preferSize;
3106 else if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
3107 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
3108 else if ( granularity == -1 ) {
3109 // Make sure bufferSize is a power of two.
3110 int log2_of_min_size = 0;
3111 int log2_of_max_size = 0;
3113 for ( unsigned int i = 0; i < sizeof(long) * 8; i++ ) {
3114 if ( minSize & ((long)1 << i) ) log2_of_min_size = i;
3115 if ( maxSize & ((long)1 << i) ) log2_of_max_size = i;
3118 long min_delta = std::abs( (long)*bufferSize - ((long)1 << log2_of_min_size) );
3119 int min_delta_num = log2_of_min_size;
3121 for (int i = log2_of_min_size + 1; i <= log2_of_max_size; i++) {
3122 long current_delta = std::abs( (long)*bufferSize - ((long)1 << i) );
3123 if (current_delta < min_delta) {
3124 min_delta = current_delta;
3129 *bufferSize = ( (unsigned int)1 << min_delta_num );
3130 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
3131 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
3133 else if ( granularity != 0 ) {
3134 // Set to an even multiple of granularity, rounding up.
3135 *bufferSize = (*bufferSize + granularity-1) / granularity * granularity;
3140 // we don't use it anymore, see above!
3141 // Just left it here for the case...
3142 if ( isDuplexInput && stream_.bufferSize != *bufferSize ) {
3143 errorText_ = "RtApiAsio::probeDeviceOpen: input/output buffersize discrepancy!";
3148 stream_.bufferSize = *bufferSize;
3149 stream_.nBuffers = 2;
3151 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
3152 else stream_.userInterleaved = true;
3154 // ASIO always uses non-interleaved buffers.
3155 stream_.deviceInterleaved[mode] = false;
3157 // Allocate, if necessary, our AsioHandle structure for the stream.
3158 if ( handle == 0 ) {
3160 handle = new AsioHandle;
3162 catch ( std::bad_alloc& ) {
3163 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating AsioHandle memory.";
3166 handle->bufferInfos = 0;
3168 // Create a manual-reset event.
3169 handle->condition = CreateEvent( NULL, // no security
3170 TRUE, // manual-reset
3171 FALSE, // non-signaled initially
3173 stream_.apiHandle = (void *) handle;
3176 // Create the ASIO internal buffers. Since RtAudio sets up input
3177 // and output separately, we'll have to dispose of previously
3178 // created output buffers for a duplex stream.
3179 if ( mode == INPUT && stream_.mode == OUTPUT ) {
3180 ASIODisposeBuffers();
3181 if ( handle->bufferInfos ) free( handle->bufferInfos );
3184 // Allocate, initialize, and save the bufferInfos in our stream callbackInfo structure.
3186 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
3187 handle->bufferInfos = (ASIOBufferInfo *) malloc( nChannels * sizeof(ASIOBufferInfo) );
3188 if ( handle->bufferInfos == NULL ) {
3189 errorStream_ << "RtApiAsio::probeDeviceOpen: error allocating bufferInfo memory for driver (" << driverName << ").";
3190 errorText_ = errorStream_.str();
3194 ASIOBufferInfo *infos;
3195 infos = handle->bufferInfos;
3196 for ( i=0; i<stream_.nDeviceChannels[0]; i++, infos++ ) {
3197 infos->isInput = ASIOFalse;
3198 infos->channelNum = i + stream_.channelOffset[0];
3199 infos->buffers[0] = infos->buffers[1] = 0;
3201 for ( i=0; i<stream_.nDeviceChannels[1]; i++, infos++ ) {
3202 infos->isInput = ASIOTrue;
3203 infos->channelNum = i + stream_.channelOffset[1];
3204 infos->buffers[0] = infos->buffers[1] = 0;
3207 // prepare for callbacks
3208 stream_.sampleRate = sampleRate;
3209 stream_.device[mode] = device;
3210 stream_.mode = isDuplexInput ? DUPLEX : mode;
3212 // store this class instance before registering callbacks, that are going to use it
3213 asioCallbackInfo = &stream_.callbackInfo;
3214 stream_.callbackInfo.object = (void *) this;
3216 // Set up the ASIO callback structure and create the ASIO data buffers.
3217 asioCallbacks.bufferSwitch = &bufferSwitch;
3218 asioCallbacks.sampleRateDidChange = &sampleRateChanged;
3219 asioCallbacks.asioMessage = &asioMessages;
3220 asioCallbacks.bufferSwitchTimeInfo = NULL;
3221 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
3222 if ( result != ASE_OK ) {
3223 // Standard method failed. This can happen with strict/misbehaving drivers that return valid buffer size ranges
3224 // but only accept the preferred buffer size as parameter for ASIOCreateBuffers (e.g. Creative's ASIO driver).
3225 // In that case, let's be naïve and try that instead.
3226 *bufferSize = preferSize;
3227 stream_.bufferSize = *bufferSize;
3228 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
3231 if ( result != ASE_OK ) {
3232 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") creating buffers.";
3233 errorText_ = errorStream_.str();
3236 buffersAllocated = true;
3237 stream_.state = STREAM_STOPPED;
3239 // Set flags for buffer conversion.
3240 stream_.doConvertBuffer[mode] = false;
3241 if ( stream_.userFormat != stream_.deviceFormat[mode] )
3242 stream_.doConvertBuffer[mode] = true;
3243 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
3244 stream_.nUserChannels[mode] > 1 )
3245 stream_.doConvertBuffer[mode] = true;
3247 // Allocate necessary internal buffers
3248 unsigned long bufferBytes;
3249 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
3250 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
3251 if ( stream_.userBuffer[mode] == NULL ) {
3252 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating user buffer memory.";
3256 if ( stream_.doConvertBuffer[mode] ) {
3258 bool makeBuffer = true;
3259 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
3260 if ( isDuplexInput && stream_.deviceBuffer ) {
3261 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
3262 if ( bufferBytes <= bytesOut ) makeBuffer = false;
3266 bufferBytes *= *bufferSize;
3267 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
3268 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
3269 if ( stream_.deviceBuffer == NULL ) {
3270 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating device buffer memory.";
3276 // Determine device latencies
3277 long inputLatency, outputLatency;
3278 result = ASIOGetLatencies( &inputLatency, &outputLatency );
3279 if ( result != ASE_OK ) {
3280 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting latency.";
3281 errorText_ = errorStream_.str();
3282 error( RtAudioError::WARNING); // warn but don't fail
3285 stream_.latency[0] = outputLatency;
3286 stream_.latency[1] = inputLatency;
3289 // Setup the buffer conversion information structure. We don't use
3290 // buffers to do channel offsets, so we override that parameter
3292 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
3297 if ( !isDuplexInput ) {
3298 // the cleanup for error in the duplex input, is done by RtApi::openStream
3299 // So we clean up for single channel only
3301 if ( buffersAllocated )
3302 ASIODisposeBuffers();
3304 drivers.removeCurrentDriver();
3307 CloseHandle( handle->condition );
3308 if ( handle->bufferInfos )
3309 free( handle->bufferInfos );
3312 stream_.apiHandle = 0;
3316 if ( stream_.userBuffer[mode] ) {
3317 free( stream_.userBuffer[mode] );
3318 stream_.userBuffer[mode] = 0;
3321 if ( stream_.deviceBuffer ) {
3322 free( stream_.deviceBuffer );
3323 stream_.deviceBuffer = 0;
3328 }////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
3330 void RtApiAsio :: closeStream()
3332 if ( stream_.state == STREAM_CLOSED ) {
3333 errorText_ = "RtApiAsio::closeStream(): no open stream to close!";
3334 error( RtAudioError::WARNING );
3338 if ( stream_.state == STREAM_RUNNING ) {
3339 stream_.state = STREAM_STOPPED;
3342 ASIODisposeBuffers();
3343 drivers.removeCurrentDriver();
3345 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3347 CloseHandle( handle->condition );
3348 if ( handle->bufferInfos )
3349 free( handle->bufferInfos );
3351 stream_.apiHandle = 0;
3354 for ( int i=0; i<2; i++ ) {
3355 if ( stream_.userBuffer[i] ) {
3356 free( stream_.userBuffer[i] );
3357 stream_.userBuffer[i] = 0;
3361 if ( stream_.deviceBuffer ) {
3362 free( stream_.deviceBuffer );
3363 stream_.deviceBuffer = 0;
3366 stream_.mode = UNINITIALIZED;
3367 stream_.state = STREAM_CLOSED;
3370 bool stopThreadCalled = false;
3372 void RtApiAsio :: startStream()
3375 if ( stream_.state == STREAM_RUNNING ) {
3376 errorText_ = "RtApiAsio::startStream(): the stream is already running!";
3377 error( RtAudioError::WARNING );
3381 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3382 ASIOError result = ASIOStart();
3383 if ( result != ASE_OK ) {
3384 errorStream_ << "RtApiAsio::startStream: error (" << getAsioErrorString( result ) << ") starting device.";
3385 errorText_ = errorStream_.str();
3389 handle->drainCounter = 0;
3390 handle->internalDrain = false;
3391 ResetEvent( handle->condition );
3392 stream_.state = STREAM_RUNNING;
3396 stopThreadCalled = false;
3398 if ( result == ASE_OK ) return;
3399 error( RtAudioError::SYSTEM_ERROR );
3402 void RtApiAsio :: stopStream()
3405 if ( stream_.state == STREAM_STOPPED ) {
3406 errorText_ = "RtApiAsio::stopStream(): the stream is already stopped!";
3407 error( RtAudioError::WARNING );
3411 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3412 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
3413 if ( handle->drainCounter == 0 ) {
3414 handle->drainCounter = 2;
3415 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
3419 stream_.state = STREAM_STOPPED;
3421 ASIOError result = ASIOStop();
3422 if ( result != ASE_OK ) {
3423 errorStream_ << "RtApiAsio::stopStream: error (" << getAsioErrorString( result ) << ") stopping device.";
3424 errorText_ = errorStream_.str();
3427 if ( result == ASE_OK ) return;
3428 error( RtAudioError::SYSTEM_ERROR );
3431 void RtApiAsio :: abortStream()
3434 if ( stream_.state == STREAM_STOPPED ) {
3435 errorText_ = "RtApiAsio::abortStream(): the stream is already stopped!";
3436 error( RtAudioError::WARNING );
3440 // The following lines were commented-out because some behavior was
3441 // noted where the device buffers need to be zeroed to avoid
3442 // continuing sound, even when the device buffers are completely
3443 // disposed. So now, calling abort is the same as calling stop.
3444 // AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3445 // handle->drainCounter = 2;
3449 // This function will be called by a spawned thread when the user
3450 // callback function signals that the stream should be stopped or
3451 // aborted. It is necessary to handle it this way because the
3452 // callbackEvent() function must return before the ASIOStop()
3453 // function will return.
3454 static unsigned __stdcall asioStopStream( void *ptr )
3456 CallbackInfo *info = (CallbackInfo *) ptr;
3457 RtApiAsio *object = (RtApiAsio *) info->object;
3459 object->stopStream();
3464 bool RtApiAsio :: callbackEvent( long bufferIndex )
3466 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
3467 if ( stream_.state == STREAM_CLOSED ) {
3468 errorText_ = "RtApiAsio::callbackEvent(): the stream is closed ... this shouldn't happen!";
3469 error( RtAudioError::WARNING );
3473 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
3474 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3476 // Check if we were draining the stream and signal if finished.
3477 if ( handle->drainCounter > 3 ) {
3479 stream_.state = STREAM_STOPPING;
3480 if ( handle->internalDrain == false )
3481 SetEvent( handle->condition );
3482 else { // spawn a thread to stop the stream
3484 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
3485 &stream_.callbackInfo, 0, &threadId );
3490 // Invoke user callback to get fresh output data UNLESS we are
3492 if ( handle->drainCounter == 0 ) {
3493 RtAudioCallback callback = (RtAudioCallback) info->callback;
3494 double streamTime = getStreamTime();
3495 RtAudioStreamStatus status = 0;
3496 if ( stream_.mode != INPUT && asioXRun == true ) {
3497 status |= RTAUDIO_OUTPUT_UNDERFLOW;
3500 if ( stream_.mode != OUTPUT && asioXRun == true ) {
3501 status |= RTAUDIO_INPUT_OVERFLOW;
3504 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
3505 stream_.bufferSize, streamTime, status, info->userData );
3506 if ( cbReturnValue == 2 ) {
3507 stream_.state = STREAM_STOPPING;
3508 handle->drainCounter = 2;
3510 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
3511 &stream_.callbackInfo, 0, &threadId );
3514 else if ( cbReturnValue == 1 ) {
3515 handle->drainCounter = 1;
3516 handle->internalDrain = true;
3520 unsigned int nChannels, bufferBytes, i, j;
3521 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
3522 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
3524 bufferBytes = stream_.bufferSize * formatBytes( stream_.deviceFormat[0] );
3526 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
3528 for ( i=0, j=0; i<nChannels; i++ ) {
3529 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3530 memset( handle->bufferInfos[i].buffers[bufferIndex], 0, bufferBytes );
3534 else if ( stream_.doConvertBuffer[0] ) {
3536 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
3537 if ( stream_.doByteSwap[0] )
3538 byteSwapBuffer( stream_.deviceBuffer,
3539 stream_.bufferSize * stream_.nDeviceChannels[0],
3540 stream_.deviceFormat[0] );
3542 for ( i=0, j=0; i<nChannels; i++ ) {
3543 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3544 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
3545 &stream_.deviceBuffer[j++*bufferBytes], bufferBytes );
3551 if ( stream_.doByteSwap[0] )
3552 byteSwapBuffer( stream_.userBuffer[0],
3553 stream_.bufferSize * stream_.nUserChannels[0],
3554 stream_.userFormat );
3556 for ( i=0, j=0; i<nChannels; i++ ) {
3557 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3558 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
3559 &stream_.userBuffer[0][bufferBytes*j++], bufferBytes );
3565 // Don't bother draining input
3566 if ( handle->drainCounter ) {
3567 handle->drainCounter++;
3571 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
3573 bufferBytes = stream_.bufferSize * formatBytes(stream_.deviceFormat[1]);
3575 if (stream_.doConvertBuffer[1]) {
3577 // Always interleave ASIO input data.
3578 for ( i=0, j=0; i<nChannels; i++ ) {
3579 if ( handle->bufferInfos[i].isInput == ASIOTrue )
3580 memcpy( &stream_.deviceBuffer[j++*bufferBytes],
3581 handle->bufferInfos[i].buffers[bufferIndex],
3585 if ( stream_.doByteSwap[1] )
3586 byteSwapBuffer( stream_.deviceBuffer,
3587 stream_.bufferSize * stream_.nDeviceChannels[1],
3588 stream_.deviceFormat[1] );
3589 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
3593 for ( i=0, j=0; i<nChannels; i++ ) {
3594 if ( handle->bufferInfos[i].isInput == ASIOTrue ) {
3595 memcpy( &stream_.userBuffer[1][bufferBytes*j++],
3596 handle->bufferInfos[i].buffers[bufferIndex],
3601 if ( stream_.doByteSwap[1] )
3602 byteSwapBuffer( stream_.userBuffer[1],
3603 stream_.bufferSize * stream_.nUserChannels[1],
3604 stream_.userFormat );
3609 // The following call was suggested by Malte Clasen. While the API
3610 // documentation indicates it should not be required, some device
3611 // drivers apparently do not function correctly without it.
3614 RtApi::tickStreamTime();
3618 static void sampleRateChanged( ASIOSampleRate sRate )
3620 // The ASIO documentation says that this usually only happens during
3621 // external sync. Audio processing is not stopped by the driver,
3622 // actual sample rate might not have even changed, maybe only the
3623 // sample rate status of an AES/EBU or S/PDIF digital input at the
3626 RtApi *object = (RtApi *) asioCallbackInfo->object;
3628 object->stopStream();
3630 catch ( RtAudioError &exception ) {
3631 std::cerr << "\nRtApiAsio: sampleRateChanged() error (" << exception.getMessage() << ")!\n" << std::endl;
3635 std::cerr << "\nRtApiAsio: driver reports sample rate changed to " << sRate << " ... stream stopped!!!\n" << std::endl;
3638 static long asioMessages( long selector, long value, void* /*message*/, double* /*opt*/ )
3642 switch( selector ) {
3643 case kAsioSelectorSupported:
3644 if ( value == kAsioResetRequest
3645 || value == kAsioEngineVersion
3646 || value == kAsioResyncRequest
3647 || value == kAsioLatenciesChanged
3648 // The following three were added for ASIO 2.0, you don't
3649 // necessarily have to support them.
3650 || value == kAsioSupportsTimeInfo
3651 || value == kAsioSupportsTimeCode
3652 || value == kAsioSupportsInputMonitor)
3655 case kAsioResetRequest:
3656 // Defer the task and perform the reset of the driver during the
3657 // next "safe" situation. You cannot reset the driver right now,
3658 // as this code is called from the driver. Reset the driver is
3659 // done by completely destruct is. I.e. ASIOStop(),
3660 // ASIODisposeBuffers(), Destruction Afterwards you initialize the
3662 std::cerr << "\nRtApiAsio: driver reset requested!!!" << std::endl;
3665 case kAsioResyncRequest:
3666 // This informs the application that the driver encountered some
3667 // non-fatal data loss. It is used for synchronization purposes
3668 // of different media. Added mainly to work around the Win16Mutex
3669 // problems in Windows 95/98 with the Windows Multimedia system,
3670 // which could lose data because the Mutex was held too long by
3671 // another thread. However a driver can issue it in other
3673 // std::cerr << "\nRtApiAsio: driver resync requested!!!" << std::endl;
3677 case kAsioLatenciesChanged:
3678 // This will inform the host application that the drivers were
3679 // latencies changed. Beware, it this does not mean that the
3680 // buffer sizes have changed! You might need to update internal
3682 std::cerr << "\nRtApiAsio: driver latency may have changed!!!" << std::endl;
3685 case kAsioEngineVersion:
3686 // Return the supported ASIO version of the host application. If
3687 // a host application does not implement this selector, ASIO 1.0
3688 // is assumed by the driver.
3691 case kAsioSupportsTimeInfo:
3692 // Informs the driver whether the
3693 // asioCallbacks.bufferSwitchTimeInfo() callback is supported.
3694 // For compatibility with ASIO 1.0 drivers the host application
3695 // should always support the "old" bufferSwitch method, too.
3698 case kAsioSupportsTimeCode:
3699 // Informs the driver whether application is interested in time
3700 // code info. If an application does not need to know about time
3701 // code, the driver has less work to do.
3708 static const char* getAsioErrorString( ASIOError result )
3716 static const Messages m[] =
3718 { ASE_NotPresent, "Hardware input or output is not present or available." },
3719 { ASE_HWMalfunction, "Hardware is malfunctioning." },
3720 { ASE_InvalidParameter, "Invalid input parameter." },
3721 { ASE_InvalidMode, "Invalid mode." },
3722 { ASE_SPNotAdvancing, "Sample position not advancing." },
3723 { ASE_NoClock, "Sample clock or rate cannot be determined or is not present." },
3724 { ASE_NoMemory, "Not enough memory to complete the request." }
3727 for ( unsigned int i = 0; i < sizeof(m)/sizeof(m[0]); ++i )
3728 if ( m[i].value == result ) return m[i].message;
3730 return "Unknown error.";
3733 //******************** End of __WINDOWS_ASIO__ *********************//
3737 #if defined(__WINDOWS_WASAPI__) // Windows WASAPI API
3739 // Authored by Marcus Tomlinson <themarcustomlinson@gmail.com>, April 2014
3740 // - Introduces support for the Windows WASAPI API
3741 // - Aims to deliver bit streams to and from hardware at the lowest possible latency, via the absolute minimum buffer sizes required
3742 // - Provides flexible stream configuration to an otherwise strict and inflexible WASAPI interface
3743 // - Includes automatic internal conversion of sample rate and buffer size between hardware and the user
3750 #include <mferror.h>
3752 #include <mftransform.h>
3753 #include <wmcodecdsp.h>
3755 #include <audioclient.h>
3757 #include <mmdeviceapi.h>
3758 #include <functiondiscoverykeys_devpkey.h>
3760 #ifndef MF_E_TRANSFORM_NEED_MORE_INPUT
3761 #define MF_E_TRANSFORM_NEED_MORE_INPUT _HRESULT_TYPEDEF_(0xc00d6d72)
3764 #ifndef MFSTARTUP_NOSOCKET
3765 #define MFSTARTUP_NOSOCKET 0x1
3769 #pragma comment( lib, "ksuser" )
3770 #pragma comment( lib, "mfplat.lib" )
3771 #pragma comment( lib, "mfuuid.lib" )
3772 #pragma comment( lib, "wmcodecdspuuid" )
3775 //=============================================================================
3777 #define SAFE_RELEASE( objectPtr )\
3780 objectPtr->Release();\
3784 typedef HANDLE ( __stdcall *TAvSetMmThreadCharacteristicsPtr )( LPCWSTR TaskName, LPDWORD TaskIndex );
3786 //-----------------------------------------------------------------------------
3788 // WASAPI dictates stream sample rate, format, channel count, and in some cases, buffer size.
3789 // Therefore we must perform all necessary conversions to user buffers in order to satisfy these
3790 // requirements. WasapiBuffer ring buffers are used between HwIn->UserIn and UserOut->HwOut to
3791 // provide intermediate storage for read / write synchronization.
3805 // sets the length of the internal ring buffer
3806 void setBufferSize( unsigned int bufferSize, unsigned int formatBytes ) {
3809 buffer_ = ( char* ) calloc( bufferSize, formatBytes );
3811 bufferSize_ = bufferSize;
3816 // attempt to push a buffer into the ring buffer at the current "in" index
3817 bool pushBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
3819 if ( !buffer || // incoming buffer is NULL
3820 bufferSize == 0 || // incoming buffer has no data
3821 bufferSize > bufferSize_ ) // incoming buffer too large
3826 unsigned int relOutIndex = outIndex_;
3827 unsigned int inIndexEnd = inIndex_ + bufferSize;
3828 if ( relOutIndex < inIndex_ && inIndexEnd >= bufferSize_ ) {
3829 relOutIndex += bufferSize_;
3832 // "in" index can end on the "out" index but cannot begin at it
3833 if ( inIndex_ <= relOutIndex && inIndexEnd > relOutIndex ) {
3834 return false; // not enough space between "in" index and "out" index
3837 // copy buffer from external to internal
3838 int fromZeroSize = inIndex_ + bufferSize - bufferSize_;
3839 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
3840 int fromInSize = bufferSize - fromZeroSize;
3845 memcpy( &( ( char* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( char ) );
3846 memcpy( buffer_, &( ( char* ) buffer )[fromInSize], fromZeroSize * sizeof( char ) );
3848 case RTAUDIO_SINT16:
3849 memcpy( &( ( short* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( short ) );
3850 memcpy( buffer_, &( ( short* ) buffer )[fromInSize], fromZeroSize * sizeof( short ) );
3852 case RTAUDIO_SINT24:
3853 memcpy( &( ( S24* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( S24 ) );
3854 memcpy( buffer_, &( ( S24* ) buffer )[fromInSize], fromZeroSize * sizeof( S24 ) );
3856 case RTAUDIO_SINT32:
3857 memcpy( &( ( int* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( int ) );
3858 memcpy( buffer_, &( ( int* ) buffer )[fromInSize], fromZeroSize * sizeof( int ) );
3860 case RTAUDIO_FLOAT32:
3861 memcpy( &( ( float* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( float ) );
3862 memcpy( buffer_, &( ( float* ) buffer )[fromInSize], fromZeroSize * sizeof( float ) );
3864 case RTAUDIO_FLOAT64:
3865 memcpy( &( ( double* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( double ) );
3866 memcpy( buffer_, &( ( double* ) buffer )[fromInSize], fromZeroSize * sizeof( double ) );
3870 // update "in" index
3871 inIndex_ += bufferSize;
3872 inIndex_ %= bufferSize_;
3877 // attempt to pull a buffer from the ring buffer from the current "out" index
3878 bool pullBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
3880 if ( !buffer || // incoming buffer is NULL
3881 bufferSize == 0 || // incoming buffer has no data
3882 bufferSize > bufferSize_ ) // incoming buffer too large
3887 unsigned int relInIndex = inIndex_;
3888 unsigned int outIndexEnd = outIndex_ + bufferSize;
3889 if ( relInIndex < outIndex_ && outIndexEnd >= bufferSize_ ) {
3890 relInIndex += bufferSize_;
3893 // "out" index can begin at and end on the "in" index
3894 if ( outIndex_ < relInIndex && outIndexEnd > relInIndex ) {
3895 return false; // not enough space between "out" index and "in" index
3898 // copy buffer from internal to external
3899 int fromZeroSize = outIndex_ + bufferSize - bufferSize_;
3900 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
3901 int fromOutSize = bufferSize - fromZeroSize;
3906 memcpy( buffer, &( ( char* ) buffer_ )[outIndex_], fromOutSize * sizeof( char ) );
3907 memcpy( &( ( char* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( char ) );
3909 case RTAUDIO_SINT16:
3910 memcpy( buffer, &( ( short* ) buffer_ )[outIndex_], fromOutSize * sizeof( short ) );
3911 memcpy( &( ( short* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( short ) );
3913 case RTAUDIO_SINT24:
3914 memcpy( buffer, &( ( S24* ) buffer_ )[outIndex_], fromOutSize * sizeof( S24 ) );
3915 memcpy( &( ( S24* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( S24 ) );
3917 case RTAUDIO_SINT32:
3918 memcpy( buffer, &( ( int* ) buffer_ )[outIndex_], fromOutSize * sizeof( int ) );
3919 memcpy( &( ( int* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( int ) );
3921 case RTAUDIO_FLOAT32:
3922 memcpy( buffer, &( ( float* ) buffer_ )[outIndex_], fromOutSize * sizeof( float ) );
3923 memcpy( &( ( float* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( float ) );
3925 case RTAUDIO_FLOAT64:
3926 memcpy( buffer, &( ( double* ) buffer_ )[outIndex_], fromOutSize * sizeof( double ) );
3927 memcpy( &( ( double* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( double ) );
3931 // update "out" index
3932 outIndex_ += bufferSize;
3933 outIndex_ %= bufferSize_;
3940 unsigned int bufferSize_;
3941 unsigned int inIndex_;
3942 unsigned int outIndex_;
3945 //-----------------------------------------------------------------------------
3947 // In order to satisfy WASAPI's buffer requirements, we need a means of converting sample rate
3948 // between HW and the user. The WasapiResampler class is used to perform this conversion between
3949 // HwIn->UserIn and UserOut->HwOut during the stream callback loop.
3950 class WasapiResampler
3953 WasapiResampler( bool isFloat, unsigned int bitsPerSample, unsigned int channelCount,
3954 unsigned int inSampleRate, unsigned int outSampleRate )
3955 : _bytesPerSample( bitsPerSample / 8 )
3956 , _channelCount( channelCount )
3957 , _sampleRatio( ( float ) outSampleRate / inSampleRate )
3958 , _transformUnk( NULL )
3959 , _transform( NULL )
3960 , _mediaType( NULL )
3961 , _inputMediaType( NULL )
3962 , _outputMediaType( NULL )
3964 #ifdef __IWMResamplerProps_FWD_DEFINED__
3965 , _resamplerProps( NULL )
3968 // 1. Initialization
3970 MFStartup( MF_VERSION, MFSTARTUP_NOSOCKET );
3972 // 2. Create Resampler Transform Object
3974 CoCreateInstance( CLSID_CResamplerMediaObject, NULL, CLSCTX_INPROC_SERVER,
3975 IID_IUnknown, ( void** ) &_transformUnk );
3977 _transformUnk->QueryInterface( IID_PPV_ARGS( &_transform ) );
3979 #ifdef __IWMResamplerProps_FWD_DEFINED__
3980 _transformUnk->QueryInterface( IID_PPV_ARGS( &_resamplerProps ) );
3981 _resamplerProps->SetHalfFilterLength( 60 ); // best conversion quality
3984 // 3. Specify input / output format
3986 MFCreateMediaType( &_mediaType );
3987 _mediaType->SetGUID( MF_MT_MAJOR_TYPE, MFMediaType_Audio );
3988 _mediaType->SetGUID( MF_MT_SUBTYPE, isFloat ? MFAudioFormat_Float : MFAudioFormat_PCM );
3989 _mediaType->SetUINT32( MF_MT_AUDIO_NUM_CHANNELS, channelCount );
3990 _mediaType->SetUINT32( MF_MT_AUDIO_SAMPLES_PER_SECOND, inSampleRate );
3991 _mediaType->SetUINT32( MF_MT_AUDIO_BLOCK_ALIGNMENT, _bytesPerSample * channelCount );
3992 _mediaType->SetUINT32( MF_MT_AUDIO_AVG_BYTES_PER_SECOND, _bytesPerSample * channelCount * inSampleRate );
3993 _mediaType->SetUINT32( MF_MT_AUDIO_BITS_PER_SAMPLE, bitsPerSample );
3994 _mediaType->SetUINT32( MF_MT_ALL_SAMPLES_INDEPENDENT, TRUE );
3996 MFCreateMediaType( &_inputMediaType );
3997 _mediaType->CopyAllItems( _inputMediaType );
3999 _transform->SetInputType( 0, _inputMediaType, 0 );
4001 MFCreateMediaType( &_outputMediaType );
4002 _mediaType->CopyAllItems( _outputMediaType );
4004 _outputMediaType->SetUINT32( MF_MT_AUDIO_SAMPLES_PER_SECOND, outSampleRate );
4005 _outputMediaType->SetUINT32( MF_MT_AUDIO_AVG_BYTES_PER_SECOND, _bytesPerSample * channelCount * outSampleRate );
4007 _transform->SetOutputType( 0, _outputMediaType, 0 );
4009 // 4. Send stream start messages to Resampler
4011 _transform->ProcessMessage( MFT_MESSAGE_COMMAND_FLUSH, 0 );
4012 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_BEGIN_STREAMING, 0 );
4013 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_START_OF_STREAM, 0 );
4018 // 8. Send stream stop messages to Resampler
4020 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_END_OF_STREAM, 0 );
4021 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_END_STREAMING, 0 );
4027 SAFE_RELEASE( _transformUnk );
4028 SAFE_RELEASE( _transform );
4029 SAFE_RELEASE( _mediaType );
4030 SAFE_RELEASE( _inputMediaType );
4031 SAFE_RELEASE( _outputMediaType );
4033 #ifdef __IWMResamplerProps_FWD_DEFINED__
4034 SAFE_RELEASE( _resamplerProps );
4038 void Convert( char* outBuffer, const char* inBuffer, unsigned int inSampleCount, unsigned int& outSampleCount )
4040 unsigned int inputBufferSize = _bytesPerSample * _channelCount * inSampleCount;
4041 if ( _sampleRatio == 1 )
4043 // no sample rate conversion required
4044 memcpy( outBuffer, inBuffer, inputBufferSize );
4045 outSampleCount = inSampleCount;
4049 unsigned int outputBufferSize = ( unsigned int ) ceilf( inputBufferSize * _sampleRatio ) + ( _bytesPerSample * _channelCount );
4051 IMFMediaBuffer* rInBuffer;
4052 IMFSample* rInSample;
4053 BYTE* rInByteBuffer = NULL;
4055 // 5. Create Sample object from input data
4057 MFCreateMemoryBuffer( inputBufferSize, &rInBuffer );
4059 rInBuffer->Lock( &rInByteBuffer, NULL, NULL );
4060 memcpy( rInByteBuffer, inBuffer, inputBufferSize );
4061 rInBuffer->Unlock();
4062 rInByteBuffer = NULL;
4064 rInBuffer->SetCurrentLength( inputBufferSize );
4066 MFCreateSample( &rInSample );
4067 rInSample->AddBuffer( rInBuffer );
4069 // 6. Pass input data to Resampler
4071 _transform->ProcessInput( 0, rInSample, 0 );
4073 SAFE_RELEASE( rInBuffer );
4074 SAFE_RELEASE( rInSample );
4076 // 7. Perform sample rate conversion
4078 IMFMediaBuffer* rOutBuffer = NULL;
4079 BYTE* rOutByteBuffer = NULL;
4081 MFT_OUTPUT_DATA_BUFFER rOutDataBuffer;
4083 DWORD rBytes = outputBufferSize; // maximum bytes accepted per ProcessOutput
4085 // 7.1 Create Sample object for output data
4087 memset( &rOutDataBuffer, 0, sizeof rOutDataBuffer );
4088 MFCreateSample( &( rOutDataBuffer.pSample ) );
4089 MFCreateMemoryBuffer( rBytes, &rOutBuffer );
4090 rOutDataBuffer.pSample->AddBuffer( rOutBuffer );
4091 rOutDataBuffer.dwStreamID = 0;
4092 rOutDataBuffer.dwStatus = 0;
4093 rOutDataBuffer.pEvents = NULL;
4095 // 7.2 Get output data from Resampler
4097 if ( _transform->ProcessOutput( 0, 1, &rOutDataBuffer, &rStatus ) == MF_E_TRANSFORM_NEED_MORE_INPUT )
4100 SAFE_RELEASE( rOutBuffer );
4101 SAFE_RELEASE( rOutDataBuffer.pSample );
4105 // 7.3 Write output data to outBuffer
4107 SAFE_RELEASE( rOutBuffer );
4108 rOutDataBuffer.pSample->ConvertToContiguousBuffer( &rOutBuffer );
4109 rOutBuffer->GetCurrentLength( &rBytes );
4111 rOutBuffer->Lock( &rOutByteBuffer, NULL, NULL );
4112 memcpy( outBuffer, rOutByteBuffer, rBytes );
4113 rOutBuffer->Unlock();
4114 rOutByteBuffer = NULL;
4116 outSampleCount = rBytes / _bytesPerSample / _channelCount;
4117 SAFE_RELEASE( rOutBuffer );
4118 SAFE_RELEASE( rOutDataBuffer.pSample );
4122 unsigned int _bytesPerSample;
4123 unsigned int _channelCount;
4126 IUnknown* _transformUnk;
4127 IMFTransform* _transform;
4128 IMFMediaType* _mediaType;
4129 IMFMediaType* _inputMediaType;
4130 IMFMediaType* _outputMediaType;
4132 #ifdef __IWMResamplerProps_FWD_DEFINED__
4133 IWMResamplerProps* _resamplerProps;
4137 //-----------------------------------------------------------------------------
4139 // A structure to hold various information related to the WASAPI implementation.
4142 IAudioClient* captureAudioClient;
4143 IAudioClient* renderAudioClient;
4144 IAudioCaptureClient* captureClient;
4145 IAudioRenderClient* renderClient;
4146 HANDLE captureEvent;
4150 : captureAudioClient( NULL ),
4151 renderAudioClient( NULL ),
4152 captureClient( NULL ),
4153 renderClient( NULL ),
4154 captureEvent( NULL ),
4155 renderEvent( NULL ) {}
4158 //=============================================================================
4160 RtApiWasapi::RtApiWasapi()
4161 : coInitialized_( false ), deviceEnumerator_( NULL )
4163 // WASAPI can run either apartment or multi-threaded
4164 HRESULT hr = CoInitialize( NULL );
4165 if ( !FAILED( hr ) )
4166 coInitialized_ = true;
4168 // Instantiate device enumerator
4169 hr = CoCreateInstance( __uuidof( MMDeviceEnumerator ), NULL,
4170 CLSCTX_ALL, __uuidof( IMMDeviceEnumerator ),
4171 ( void** ) &deviceEnumerator_ );
4173 // If this runs on an old Windows, it will fail. Ignore and proceed.
4175 deviceEnumerator_ = NULL;
4178 //-----------------------------------------------------------------------------
4180 RtApiWasapi::~RtApiWasapi()
4182 if ( stream_.state != STREAM_CLOSED )
4185 SAFE_RELEASE( deviceEnumerator_ );
4187 // If this object previously called CoInitialize()
4188 if ( coInitialized_ )
4192 //=============================================================================
4194 unsigned int RtApiWasapi::getDeviceCount( void )
4196 unsigned int captureDeviceCount = 0;
4197 unsigned int renderDeviceCount = 0;
4199 IMMDeviceCollection* captureDevices = NULL;
4200 IMMDeviceCollection* renderDevices = NULL;
4202 if ( !deviceEnumerator_ )
4205 // Count capture devices
4207 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4208 if ( FAILED( hr ) ) {
4209 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device collection.";
4213 hr = captureDevices->GetCount( &captureDeviceCount );
4214 if ( FAILED( hr ) ) {
4215 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device count.";
4219 // Count render devices
4220 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4221 if ( FAILED( hr ) ) {
4222 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device collection.";
4226 hr = renderDevices->GetCount( &renderDeviceCount );
4227 if ( FAILED( hr ) ) {
4228 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device count.";
4233 // release all references
4234 SAFE_RELEASE( captureDevices );
4235 SAFE_RELEASE( renderDevices );
4237 if ( errorText_.empty() )
4238 return captureDeviceCount + renderDeviceCount;
4240 error( RtAudioError::DRIVER_ERROR );
4244 //-----------------------------------------------------------------------------
4246 RtAudio::DeviceInfo RtApiWasapi::getDeviceInfo( unsigned int device )
4248 RtAudio::DeviceInfo info;
4249 unsigned int captureDeviceCount = 0;
4250 unsigned int renderDeviceCount = 0;
4251 std::string defaultDeviceName;
4252 bool isCaptureDevice = false;
4254 PROPVARIANT deviceNameProp;
4255 PROPVARIANT defaultDeviceNameProp;
4257 IMMDeviceCollection* captureDevices = NULL;
4258 IMMDeviceCollection* renderDevices = NULL;
4259 IMMDevice* devicePtr = NULL;
4260 IMMDevice* defaultDevicePtr = NULL;
4261 IAudioClient* audioClient = NULL;
4262 IPropertyStore* devicePropStore = NULL;
4263 IPropertyStore* defaultDevicePropStore = NULL;
4265 WAVEFORMATEX* deviceFormat = NULL;
4266 WAVEFORMATEX* closestMatchFormat = NULL;
4269 info.probed = false;
4271 // Count capture devices
4273 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4274 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4275 if ( FAILED( hr ) ) {
4276 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device collection.";
4280 hr = captureDevices->GetCount( &captureDeviceCount );
4281 if ( FAILED( hr ) ) {
4282 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device count.";
4286 // Count render devices
4287 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4288 if ( FAILED( hr ) ) {
4289 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device collection.";
4293 hr = renderDevices->GetCount( &renderDeviceCount );
4294 if ( FAILED( hr ) ) {
4295 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device count.";
4299 // validate device index
4300 if ( device >= captureDeviceCount + renderDeviceCount ) {
4301 errorText_ = "RtApiWasapi::getDeviceInfo: Invalid device index.";
4302 errorType = RtAudioError::INVALID_USE;
4306 // determine whether index falls within capture or render devices
4307 if ( device >= renderDeviceCount ) {
4308 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
4309 if ( FAILED( hr ) ) {
4310 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device handle.";
4313 isCaptureDevice = true;
4316 hr = renderDevices->Item( device, &devicePtr );
4317 if ( FAILED( hr ) ) {
4318 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device handle.";
4321 isCaptureDevice = false;
4324 // get default device name
4325 if ( isCaptureDevice ) {
4326 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eCapture, eConsole, &defaultDevicePtr );
4327 if ( FAILED( hr ) ) {
4328 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default capture device handle.";
4333 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eRender, eConsole, &defaultDevicePtr );
4334 if ( FAILED( hr ) ) {
4335 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default render device handle.";
4340 hr = defaultDevicePtr->OpenPropertyStore( STGM_READ, &defaultDevicePropStore );
4341 if ( FAILED( hr ) ) {
4342 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open default device property store.";
4345 PropVariantInit( &defaultDeviceNameProp );
4347 hr = defaultDevicePropStore->GetValue( PKEY_Device_FriendlyName, &defaultDeviceNameProp );
4348 if ( FAILED( hr ) ) {
4349 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default device property: PKEY_Device_FriendlyName.";
4353 defaultDeviceName = convertCharPointerToStdString(defaultDeviceNameProp.pwszVal);
4356 hr = devicePtr->OpenPropertyStore( STGM_READ, &devicePropStore );
4357 if ( FAILED( hr ) ) {
4358 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open device property store.";
4362 PropVariantInit( &deviceNameProp );
4364 hr = devicePropStore->GetValue( PKEY_Device_FriendlyName, &deviceNameProp );
4365 if ( FAILED( hr ) ) {
4366 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device property: PKEY_Device_FriendlyName.";
4370 info.name =convertCharPointerToStdString(deviceNameProp.pwszVal);
4373 if ( isCaptureDevice ) {
4374 info.isDefaultInput = info.name == defaultDeviceName;
4375 info.isDefaultOutput = false;
4378 info.isDefaultInput = false;
4379 info.isDefaultOutput = info.name == defaultDeviceName;
4383 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL, NULL, ( void** ) &audioClient );
4384 if ( FAILED( hr ) ) {
4385 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device audio client.";
4389 hr = audioClient->GetMixFormat( &deviceFormat );
4390 if ( FAILED( hr ) ) {
4391 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device mix format.";
4395 if ( isCaptureDevice ) {
4396 info.inputChannels = deviceFormat->nChannels;
4397 info.outputChannels = 0;
4398 info.duplexChannels = 0;
4401 info.inputChannels = 0;
4402 info.outputChannels = deviceFormat->nChannels;
4403 info.duplexChannels = 0;
4407 info.sampleRates.clear();
4409 // allow support for all sample rates as we have a built-in sample rate converter
4410 for ( unsigned int i = 0; i < MAX_SAMPLE_RATES; i++ ) {
4411 info.sampleRates.push_back( SAMPLE_RATES[i] );
4413 info.preferredSampleRate = deviceFormat->nSamplesPerSec;
4416 info.nativeFormats = 0;
4418 if ( deviceFormat->wFormatTag == WAVE_FORMAT_IEEE_FLOAT ||
4419 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
4420 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_IEEE_FLOAT ) )
4422 if ( deviceFormat->wBitsPerSample == 32 ) {
4423 info.nativeFormats |= RTAUDIO_FLOAT32;
4425 else if ( deviceFormat->wBitsPerSample == 64 ) {
4426 info.nativeFormats |= RTAUDIO_FLOAT64;
4429 else if ( deviceFormat->wFormatTag == WAVE_FORMAT_PCM ||
4430 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
4431 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_PCM ) )
4433 if ( deviceFormat->wBitsPerSample == 8 ) {
4434 info.nativeFormats |= RTAUDIO_SINT8;
4436 else if ( deviceFormat->wBitsPerSample == 16 ) {
4437 info.nativeFormats |= RTAUDIO_SINT16;
4439 else if ( deviceFormat->wBitsPerSample == 24 ) {
4440 info.nativeFormats |= RTAUDIO_SINT24;
4442 else if ( deviceFormat->wBitsPerSample == 32 ) {
4443 info.nativeFormats |= RTAUDIO_SINT32;
4451 // release all references
4452 PropVariantClear( &deviceNameProp );
4453 PropVariantClear( &defaultDeviceNameProp );
4455 SAFE_RELEASE( captureDevices );
4456 SAFE_RELEASE( renderDevices );
4457 SAFE_RELEASE( devicePtr );
4458 SAFE_RELEASE( defaultDevicePtr );
4459 SAFE_RELEASE( audioClient );
4460 SAFE_RELEASE( devicePropStore );
4461 SAFE_RELEASE( defaultDevicePropStore );
4463 CoTaskMemFree( deviceFormat );
4464 CoTaskMemFree( closestMatchFormat );
4466 if ( !errorText_.empty() )
4471 //-----------------------------------------------------------------------------
4473 unsigned int RtApiWasapi::getDefaultOutputDevice( void )
4475 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
4476 if ( getDeviceInfo( i ).isDefaultOutput ) {
4484 //-----------------------------------------------------------------------------
4486 unsigned int RtApiWasapi::getDefaultInputDevice( void )
4488 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
4489 if ( getDeviceInfo( i ).isDefaultInput ) {
4497 //-----------------------------------------------------------------------------
4499 void RtApiWasapi::closeStream( void )
4501 if ( stream_.state == STREAM_CLOSED ) {
4502 errorText_ = "RtApiWasapi::closeStream: No open stream to close.";
4503 error( RtAudioError::WARNING );
4507 if ( stream_.state != STREAM_STOPPED )
4510 // clean up stream memory
4511 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient )
4512 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient )
4514 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureClient )
4515 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderClient )
4517 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent )
4518 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent );
4520 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent )
4521 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent );
4523 delete ( WasapiHandle* ) stream_.apiHandle;
4524 stream_.apiHandle = NULL;
4526 for ( int i = 0; i < 2; i++ ) {
4527 if ( stream_.userBuffer[i] ) {
4528 free( stream_.userBuffer[i] );
4529 stream_.userBuffer[i] = 0;
4533 if ( stream_.deviceBuffer ) {
4534 free( stream_.deviceBuffer );
4535 stream_.deviceBuffer = 0;
4538 // update stream state
4539 stream_.state = STREAM_CLOSED;
4542 //-----------------------------------------------------------------------------
4544 void RtApiWasapi::startStream( void )
4548 if ( stream_.state == STREAM_RUNNING ) {
4549 errorText_ = "RtApiWasapi::startStream: The stream is already running.";
4550 error( RtAudioError::WARNING );
4554 // update stream state
4555 stream_.state = STREAM_RUNNING;
4557 // create WASAPI stream thread
4558 stream_.callbackInfo.thread = ( ThreadHandle ) CreateThread( NULL, 0, runWasapiThread, this, CREATE_SUSPENDED, NULL );
4560 if ( !stream_.callbackInfo.thread ) {
4561 errorText_ = "RtApiWasapi::startStream: Unable to instantiate callback thread.";
4562 error( RtAudioError::THREAD_ERROR );
4565 SetThreadPriority( ( void* ) stream_.callbackInfo.thread, stream_.callbackInfo.priority );
4566 ResumeThread( ( void* ) stream_.callbackInfo.thread );
4570 //-----------------------------------------------------------------------------
4572 void RtApiWasapi::stopStream( void )
4576 if ( stream_.state == STREAM_STOPPED ) {
4577 errorText_ = "RtApiWasapi::stopStream: The stream is already stopped.";
4578 error( RtAudioError::WARNING );
4582 // inform stream thread by setting stream state to STREAM_STOPPING
4583 stream_.state = STREAM_STOPPING;
4585 // wait until stream thread is stopped
4586 while( stream_.state != STREAM_STOPPED ) {
4590 // Wait for the last buffer to play before stopping.
4591 Sleep( 1000 * stream_.bufferSize / stream_.sampleRate );
4593 // stop capture client if applicable
4594 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) {
4595 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient->Stop();
4596 if ( FAILED( hr ) ) {
4597 errorText_ = "RtApiWasapi::stopStream: Unable to stop capture stream.";
4598 error( RtAudioError::DRIVER_ERROR );
4603 // stop render client if applicable
4604 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) {
4605 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient->Stop();
4606 if ( FAILED( hr ) ) {
4607 errorText_ = "RtApiWasapi::stopStream: Unable to stop render stream.";
4608 error( RtAudioError::DRIVER_ERROR );
4613 // close thread handle
4614 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
4615 errorText_ = "RtApiWasapi::stopStream: Unable to close callback thread.";
4616 error( RtAudioError::THREAD_ERROR );
4620 stream_.callbackInfo.thread = (ThreadHandle) NULL;
4623 //-----------------------------------------------------------------------------
4625 void RtApiWasapi::abortStream( void )
4629 if ( stream_.state == STREAM_STOPPED ) {
4630 errorText_ = "RtApiWasapi::abortStream: The stream is already stopped.";
4631 error( RtAudioError::WARNING );
4635 // inform stream thread by setting stream state to STREAM_STOPPING
4636 stream_.state = STREAM_STOPPING;
4638 // wait until stream thread is stopped
4639 while ( stream_.state != STREAM_STOPPED ) {
4643 // stop capture client if applicable
4644 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) {
4645 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient->Stop();
4646 if ( FAILED( hr ) ) {
4647 errorText_ = "RtApiWasapi::abortStream: Unable to stop capture stream.";
4648 error( RtAudioError::DRIVER_ERROR );
4653 // stop render client if applicable
4654 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) {
4655 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient->Stop();
4656 if ( FAILED( hr ) ) {
4657 errorText_ = "RtApiWasapi::abortStream: Unable to stop render stream.";
4658 error( RtAudioError::DRIVER_ERROR );
4663 // close thread handle
4664 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
4665 errorText_ = "RtApiWasapi::abortStream: Unable to close callback thread.";
4666 error( RtAudioError::THREAD_ERROR );
4670 stream_.callbackInfo.thread = (ThreadHandle) NULL;
4673 //-----------------------------------------------------------------------------
4675 bool RtApiWasapi::probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
4676 unsigned int firstChannel, unsigned int sampleRate,
4677 RtAudioFormat format, unsigned int* bufferSize,
4678 RtAudio::StreamOptions* options )
4680 bool methodResult = FAILURE;
4681 unsigned int captureDeviceCount = 0;
4682 unsigned int renderDeviceCount = 0;
4684 IMMDeviceCollection* captureDevices = NULL;
4685 IMMDeviceCollection* renderDevices = NULL;
4686 IMMDevice* devicePtr = NULL;
4687 WAVEFORMATEX* deviceFormat = NULL;
4688 unsigned int bufferBytes;
4689 stream_.state = STREAM_STOPPED;
4691 // create API Handle if not already created
4692 if ( !stream_.apiHandle )
4693 stream_.apiHandle = ( void* ) new WasapiHandle();
4695 // Count capture devices
4697 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4698 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4699 if ( FAILED( hr ) ) {
4700 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device collection.";
4704 hr = captureDevices->GetCount( &captureDeviceCount );
4705 if ( FAILED( hr ) ) {
4706 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device count.";
4710 // Count render devices
4711 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4712 if ( FAILED( hr ) ) {
4713 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device collection.";
4717 hr = renderDevices->GetCount( &renderDeviceCount );
4718 if ( FAILED( hr ) ) {
4719 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device count.";
4723 // validate device index
4724 if ( device >= captureDeviceCount + renderDeviceCount ) {
4725 errorType = RtAudioError::INVALID_USE;
4726 errorText_ = "RtApiWasapi::probeDeviceOpen: Invalid device index.";
4730 // if device index falls within capture devices
4731 if ( device >= renderDeviceCount ) {
4732 if ( mode != INPUT ) {
4733 errorType = RtAudioError::INVALID_USE;
4734 errorText_ = "RtApiWasapi::probeDeviceOpen: Capture device selected as output device.";
4738 // retrieve captureAudioClient from devicePtr
4739 IAudioClient*& captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4741 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
4742 if ( FAILED( hr ) ) {
4743 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device handle.";
4747 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4748 NULL, ( void** ) &captureAudioClient );
4749 if ( FAILED( hr ) ) {
4750 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device audio client.";
4754 hr = captureAudioClient->GetMixFormat( &deviceFormat );
4755 if ( FAILED( hr ) ) {
4756 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device mix format.";
4760 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4761 captureAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4764 // if device index falls within render devices and is configured for loopback
4765 if ( device < renderDeviceCount && mode == INPUT )
4767 // if renderAudioClient is not initialised, initialise it now
4768 IAudioClient*& renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4769 if ( !renderAudioClient )
4771 probeDeviceOpen( device, OUTPUT, channels, firstChannel, sampleRate, format, bufferSize, options );
4774 // retrieve captureAudioClient from devicePtr
4775 IAudioClient*& captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4777 hr = renderDevices->Item( device, &devicePtr );
4778 if ( FAILED( hr ) ) {
4779 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device handle.";
4783 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4784 NULL, ( void** ) &captureAudioClient );
4785 if ( FAILED( hr ) ) {
4786 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device audio client.";
4790 hr = captureAudioClient->GetMixFormat( &deviceFormat );
4791 if ( FAILED( hr ) ) {
4792 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device mix format.";
4796 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4797 captureAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4800 // if device index falls within render devices and is configured for output
4801 if ( device < renderDeviceCount && mode == OUTPUT )
4803 // if renderAudioClient is already initialised, don't initialise it again
4804 IAudioClient*& renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4805 if ( renderAudioClient )
4807 methodResult = SUCCESS;
4811 hr = renderDevices->Item( device, &devicePtr );
4812 if ( FAILED( hr ) ) {
4813 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device handle.";
4817 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4818 NULL, ( void** ) &renderAudioClient );
4819 if ( FAILED( hr ) ) {
4820 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device audio client.";
4824 hr = renderAudioClient->GetMixFormat( &deviceFormat );
4825 if ( FAILED( hr ) ) {
4826 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device mix format.";
4830 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4831 renderAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4835 if ( ( stream_.mode == OUTPUT && mode == INPUT ) ||
4836 ( stream_.mode == INPUT && mode == OUTPUT ) ) {
4837 stream_.mode = DUPLEX;
4840 stream_.mode = mode;
4843 stream_.device[mode] = device;
4844 stream_.doByteSwap[mode] = false;
4845 stream_.sampleRate = sampleRate;
4846 stream_.bufferSize = *bufferSize;
4847 stream_.nBuffers = 1;
4848 stream_.nUserChannels[mode] = channels;
4849 stream_.channelOffset[mode] = firstChannel;
4850 stream_.userFormat = format;
4851 stream_.deviceFormat[mode] = getDeviceInfo( device ).nativeFormats;
4853 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
4854 stream_.userInterleaved = false;
4856 stream_.userInterleaved = true;
4857 stream_.deviceInterleaved[mode] = true;
4859 // Set flags for buffer conversion.
4860 stream_.doConvertBuffer[mode] = false;
4861 if ( stream_.userFormat != stream_.deviceFormat[mode] ||
4862 stream_.nUserChannels[0] != stream_.nDeviceChannels[0] ||
4863 stream_.nUserChannels[1] != stream_.nDeviceChannels[1] )
4864 stream_.doConvertBuffer[mode] = true;
4865 else if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
4866 stream_.nUserChannels[mode] > 1 )
4867 stream_.doConvertBuffer[mode] = true;
4869 if ( stream_.doConvertBuffer[mode] )
4870 setConvertInfo( mode, 0 );
4872 // Allocate necessary internal buffers
4873 bufferBytes = stream_.nUserChannels[mode] * stream_.bufferSize * formatBytes( stream_.userFormat );
4875 stream_.userBuffer[mode] = ( char* ) calloc( bufferBytes, 1 );
4876 if ( !stream_.userBuffer[mode] ) {
4877 errorType = RtAudioError::MEMORY_ERROR;
4878 errorText_ = "RtApiWasapi::probeDeviceOpen: Error allocating user buffer memory.";
4882 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME )
4883 stream_.callbackInfo.priority = 15;
4885 stream_.callbackInfo.priority = 0;
4887 ///! TODO: RTAUDIO_MINIMIZE_LATENCY // Provide stream buffers directly to callback
4888 ///! TODO: RTAUDIO_HOG_DEVICE // Exclusive mode
4890 methodResult = SUCCESS;
4894 SAFE_RELEASE( captureDevices );
4895 SAFE_RELEASE( renderDevices );
4896 SAFE_RELEASE( devicePtr );
4897 CoTaskMemFree( deviceFormat );
4899 // if method failed, close the stream
4900 if ( methodResult == FAILURE )
4903 if ( !errorText_.empty() )
4905 return methodResult;
4908 //=============================================================================
4910 DWORD WINAPI RtApiWasapi::runWasapiThread( void* wasapiPtr )
4913 ( ( RtApiWasapi* ) wasapiPtr )->wasapiThread();
4918 DWORD WINAPI RtApiWasapi::stopWasapiThread( void* wasapiPtr )
4921 ( ( RtApiWasapi* ) wasapiPtr )->stopStream();
4926 DWORD WINAPI RtApiWasapi::abortWasapiThread( void* wasapiPtr )
4929 ( ( RtApiWasapi* ) wasapiPtr )->abortStream();
4934 //-----------------------------------------------------------------------------
4936 void RtApiWasapi::wasapiThread()
4938 // as this is a new thread, we must CoInitialize it
4939 CoInitialize( NULL );
4943 IAudioClient* captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4944 IAudioClient* renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4945 IAudioCaptureClient* captureClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureClient;
4946 IAudioRenderClient* renderClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderClient;
4947 HANDLE captureEvent = ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent;
4948 HANDLE renderEvent = ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent;
4950 WAVEFORMATEX* captureFormat = NULL;
4951 WAVEFORMATEX* renderFormat = NULL;
4952 float captureSrRatio = 0.0f;
4953 float renderSrRatio = 0.0f;
4954 WasapiBuffer captureBuffer;
4955 WasapiBuffer renderBuffer;
4956 WasapiResampler* captureResampler = NULL;
4957 WasapiResampler* renderResampler = NULL;
4959 // declare local stream variables
4960 RtAudioCallback callback = ( RtAudioCallback ) stream_.callbackInfo.callback;
4961 BYTE* streamBuffer = NULL;
4962 unsigned long captureFlags = 0;
4963 unsigned int bufferFrameCount = 0;
4964 unsigned int numFramesPadding = 0;
4965 unsigned int convBufferSize = 0;
4966 bool loopbackEnabled = stream_.device[INPUT] == stream_.device[OUTPUT];
4967 bool callbackPushed = true;
4968 bool callbackPulled = false;
4969 bool callbackStopped = false;
4970 int callbackResult = 0;
4972 // convBuffer is used to store converted buffers between WASAPI and the user
4973 char* convBuffer = NULL;
4974 unsigned int convBuffSize = 0;
4975 unsigned int deviceBuffSize = 0;
4977 std::string errorText;
4978 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4980 // Attempt to assign "Pro Audio" characteristic to thread
4981 HMODULE AvrtDll = LoadLibrary( (LPCTSTR) "AVRT.dll" );
4983 DWORD taskIndex = 0;
4984 TAvSetMmThreadCharacteristicsPtr AvSetMmThreadCharacteristicsPtr = ( TAvSetMmThreadCharacteristicsPtr ) GetProcAddress( AvrtDll, "AvSetMmThreadCharacteristicsW" );
4985 AvSetMmThreadCharacteristicsPtr( L"Pro Audio", &taskIndex );
4986 FreeLibrary( AvrtDll );
4989 // start capture stream if applicable
4990 if ( captureAudioClient ) {
4991 hr = captureAudioClient->GetMixFormat( &captureFormat );
4992 if ( FAILED( hr ) ) {
4993 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
4997 // init captureResampler
4998 captureResampler = new WasapiResampler( stream_.deviceFormat[INPUT] == RTAUDIO_FLOAT32 || stream_.deviceFormat[INPUT] == RTAUDIO_FLOAT64,
4999 formatBytes( stream_.deviceFormat[INPUT] ) * 8, stream_.nDeviceChannels[INPUT],
5000 captureFormat->nSamplesPerSec, stream_.sampleRate );
5002 captureSrRatio = ( ( float ) captureFormat->nSamplesPerSec / stream_.sampleRate );
5004 if ( !captureClient ) {
5005 hr = captureAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
5006 loopbackEnabled ? AUDCLNT_STREAMFLAGS_LOOPBACK : AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
5011 if ( FAILED( hr ) ) {
5012 errorText = "RtApiWasapi::wasapiThread: Unable to initialize capture audio client.";
5016 hr = captureAudioClient->GetService( __uuidof( IAudioCaptureClient ),
5017 ( void** ) &captureClient );
5018 if ( FAILED( hr ) ) {
5019 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve capture client handle.";
5023 // don't configure captureEvent if in loopback mode
5024 if ( !loopbackEnabled )
5026 // configure captureEvent to trigger on every available capture buffer
5027 captureEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
5028 if ( !captureEvent ) {
5029 errorType = RtAudioError::SYSTEM_ERROR;
5030 errorText = "RtApiWasapi::wasapiThread: Unable to create capture event.";
5034 hr = captureAudioClient->SetEventHandle( captureEvent );
5035 if ( FAILED( hr ) ) {
5036 errorText = "RtApiWasapi::wasapiThread: Unable to set capture event handle.";
5040 ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent = captureEvent;
5043 ( ( WasapiHandle* ) stream_.apiHandle )->captureClient = captureClient;
5046 unsigned int inBufferSize = 0;
5047 hr = captureAudioClient->GetBufferSize( &inBufferSize );
5048 if ( FAILED( hr ) ) {
5049 errorText = "RtApiWasapi::wasapiThread: Unable to get capture buffer size.";
5053 // scale outBufferSize according to stream->user sample rate ratio
5054 unsigned int outBufferSize = ( unsigned int ) ceilf( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT];
5055 inBufferSize *= stream_.nDeviceChannels[INPUT];
5057 // set captureBuffer size
5058 captureBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[INPUT] ) );
5060 // reset the capture stream
5061 hr = captureAudioClient->Reset();
5062 if ( FAILED( hr ) ) {
5063 errorText = "RtApiWasapi::wasapiThread: Unable to reset capture stream.";
5067 // start the capture stream
5068 hr = captureAudioClient->Start();
5069 if ( FAILED( hr ) ) {
5070 errorText = "RtApiWasapi::wasapiThread: Unable to start capture stream.";
5075 // start render stream if applicable
5076 if ( renderAudioClient ) {
5077 hr = renderAudioClient->GetMixFormat( &renderFormat );
5078 if ( FAILED( hr ) ) {
5079 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
5083 // init renderResampler
5084 renderResampler = new WasapiResampler( stream_.deviceFormat[OUTPUT] == RTAUDIO_FLOAT32 || stream_.deviceFormat[OUTPUT] == RTAUDIO_FLOAT64,
5085 formatBytes( stream_.deviceFormat[OUTPUT] ) * 8, stream_.nDeviceChannels[OUTPUT],
5086 stream_.sampleRate, renderFormat->nSamplesPerSec );
5088 renderSrRatio = ( ( float ) renderFormat->nSamplesPerSec / stream_.sampleRate );
5090 if ( !renderClient ) {
5091 hr = renderAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
5092 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
5097 if ( FAILED( hr ) ) {
5098 errorText = "RtApiWasapi::wasapiThread: Unable to initialize render audio client.";
5102 hr = renderAudioClient->GetService( __uuidof( IAudioRenderClient ),
5103 ( void** ) &renderClient );
5104 if ( FAILED( hr ) ) {
5105 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve render client handle.";
5109 // configure renderEvent to trigger on every available render buffer
5110 renderEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
5111 if ( !renderEvent ) {
5112 errorType = RtAudioError::SYSTEM_ERROR;
5113 errorText = "RtApiWasapi::wasapiThread: Unable to create render event.";
5117 hr = renderAudioClient->SetEventHandle( renderEvent );
5118 if ( FAILED( hr ) ) {
5119 errorText = "RtApiWasapi::wasapiThread: Unable to set render event handle.";
5123 ( ( WasapiHandle* ) stream_.apiHandle )->renderClient = renderClient;
5124 ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent = renderEvent;
5127 unsigned int outBufferSize = 0;
5128 hr = renderAudioClient->GetBufferSize( &outBufferSize );
5129 if ( FAILED( hr ) ) {
5130 errorText = "RtApiWasapi::wasapiThread: Unable to get render buffer size.";
5134 // scale inBufferSize according to user->stream sample rate ratio
5135 unsigned int inBufferSize = ( unsigned int ) ceilf( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT];
5136 outBufferSize *= stream_.nDeviceChannels[OUTPUT];
5138 // set renderBuffer size
5139 renderBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[OUTPUT] ) );
5141 // reset the render stream
5142 hr = renderAudioClient->Reset();
5143 if ( FAILED( hr ) ) {
5144 errorText = "RtApiWasapi::wasapiThread: Unable to reset render stream.";
5148 // start the render stream
5149 hr = renderAudioClient->Start();
5150 if ( FAILED( hr ) ) {
5151 errorText = "RtApiWasapi::wasapiThread: Unable to start render stream.";
5156 // malloc buffer memory
5157 if ( stream_.mode == INPUT )
5159 using namespace std; // for ceilf
5160 convBuffSize = ( size_t ) ( ceilf( stream_.bufferSize * captureSrRatio ) ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
5161 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
5163 else if ( stream_.mode == OUTPUT )
5165 convBuffSize = ( size_t ) ( ceilf( stream_.bufferSize * renderSrRatio ) ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
5166 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
5168 else if ( stream_.mode == DUPLEX )
5170 convBuffSize = std::max( ( size_t ) ( ceilf( stream_.bufferSize * captureSrRatio ) ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
5171 ( size_t ) ( ceilf( stream_.bufferSize * renderSrRatio ) ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
5172 deviceBuffSize = std::max( stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
5173 stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
5176 convBuffSize *= 2; // allow overflow for *SrRatio remainders
5177 convBuffer = ( char* ) malloc( convBuffSize );
5178 stream_.deviceBuffer = ( char* ) malloc( deviceBuffSize );
5179 if ( !convBuffer || !stream_.deviceBuffer ) {
5180 errorType = RtAudioError::MEMORY_ERROR;
5181 errorText = "RtApiWasapi::wasapiThread: Error allocating device buffer memory.";
5185 // stream process loop
5186 while ( stream_.state != STREAM_STOPPING ) {
5187 if ( !callbackPulled ) {
5190 // 1. Pull callback buffer from inputBuffer
5191 // 2. If 1. was successful: Convert callback buffer to user sample rate and channel count
5192 // Convert callback buffer to user format
5194 if ( captureAudioClient )
5196 int samplesToPull = ( unsigned int ) floorf( stream_.bufferSize * captureSrRatio );
5197 if ( captureSrRatio != 1 )
5199 // account for remainders
5204 while ( convBufferSize < stream_.bufferSize )
5206 // Pull callback buffer from inputBuffer
5207 callbackPulled = captureBuffer.pullBuffer( convBuffer,
5208 samplesToPull * stream_.nDeviceChannels[INPUT],
5209 stream_.deviceFormat[INPUT] );
5211 if ( !callbackPulled )
5216 // Convert callback buffer to user sample rate
5217 unsigned int deviceBufferOffset = convBufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
5218 unsigned int convSamples = 0;
5220 captureResampler->Convert( stream_.deviceBuffer + deviceBufferOffset,
5225 convBufferSize += convSamples;
5226 samplesToPull = 1; // now pull one sample at a time until we have stream_.bufferSize samples
5229 if ( callbackPulled )
5231 if ( stream_.doConvertBuffer[INPUT] ) {
5232 // Convert callback buffer to user format
5233 convertBuffer( stream_.userBuffer[INPUT],
5234 stream_.deviceBuffer,
5235 stream_.convertInfo[INPUT] );
5238 // no further conversion, simple copy deviceBuffer to userBuffer
5239 memcpy( stream_.userBuffer[INPUT],
5240 stream_.deviceBuffer,
5241 stream_.bufferSize * stream_.nUserChannels[INPUT] * formatBytes( stream_.userFormat ) );
5246 // if there is no capture stream, set callbackPulled flag
5247 callbackPulled = true;
5252 // 1. Execute user callback method
5253 // 2. Handle return value from callback
5255 // if callback has not requested the stream to stop
5256 if ( callbackPulled && !callbackStopped ) {
5257 // Execute user callback method
5258 callbackResult = callback( stream_.userBuffer[OUTPUT],
5259 stream_.userBuffer[INPUT],
5262 captureFlags & AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY ? RTAUDIO_INPUT_OVERFLOW : 0,
5263 stream_.callbackInfo.userData );
5265 // Handle return value from callback
5266 if ( callbackResult == 1 ) {
5267 // instantiate a thread to stop this thread
5268 HANDLE threadHandle = CreateThread( NULL, 0, stopWasapiThread, this, 0, NULL );
5269 if ( !threadHandle ) {
5270 errorType = RtAudioError::THREAD_ERROR;
5271 errorText = "RtApiWasapi::wasapiThread: Unable to instantiate stream stop thread.";
5274 else if ( !CloseHandle( threadHandle ) ) {
5275 errorType = RtAudioError::THREAD_ERROR;
5276 errorText = "RtApiWasapi::wasapiThread: Unable to close stream stop thread handle.";
5280 callbackStopped = true;
5282 else if ( callbackResult == 2 ) {
5283 // instantiate a thread to stop this thread
5284 HANDLE threadHandle = CreateThread( NULL, 0, abortWasapiThread, this, 0, NULL );
5285 if ( !threadHandle ) {
5286 errorType = RtAudioError::THREAD_ERROR;
5287 errorText = "RtApiWasapi::wasapiThread: Unable to instantiate stream abort thread.";
5290 else if ( !CloseHandle( threadHandle ) ) {
5291 errorType = RtAudioError::THREAD_ERROR;
5292 errorText = "RtApiWasapi::wasapiThread: Unable to close stream abort thread handle.";
5296 callbackStopped = true;
5303 // 1. Convert callback buffer to stream format
5304 // 2. Convert callback buffer to stream sample rate and channel count
5305 // 3. Push callback buffer into outputBuffer
5307 if ( renderAudioClient && callbackPulled )
5309 // if the last call to renderBuffer.PushBuffer() was successful
5310 if ( callbackPushed || convBufferSize == 0 )
5312 if ( stream_.doConvertBuffer[OUTPUT] )
5314 // Convert callback buffer to stream format
5315 convertBuffer( stream_.deviceBuffer,
5316 stream_.userBuffer[OUTPUT],
5317 stream_.convertInfo[OUTPUT] );
5321 // no further conversion, simple copy userBuffer to deviceBuffer
5322 memcpy( stream_.deviceBuffer,
5323 stream_.userBuffer[OUTPUT],
5324 stream_.bufferSize * stream_.nUserChannels[OUTPUT] * formatBytes( stream_.userFormat ) );
5327 // Convert callback buffer to stream sample rate
5328 renderResampler->Convert( convBuffer,
5329 stream_.deviceBuffer,
5334 // Push callback buffer into outputBuffer
5335 callbackPushed = renderBuffer.pushBuffer( convBuffer,
5336 convBufferSize * stream_.nDeviceChannels[OUTPUT],
5337 stream_.deviceFormat[OUTPUT] );
5340 // if there is no render stream, set callbackPushed flag
5341 callbackPushed = true;
5346 // 1. Get capture buffer from stream
5347 // 2. Push capture buffer into inputBuffer
5348 // 3. If 2. was successful: Release capture buffer
5350 if ( captureAudioClient ) {
5351 // if the callback input buffer was not pulled from captureBuffer, wait for next capture event
5352 if ( !callbackPulled ) {
5353 WaitForSingleObject( loopbackEnabled ? renderEvent : captureEvent, INFINITE );
5356 // Get capture buffer from stream
5357 hr = captureClient->GetBuffer( &streamBuffer,
5359 &captureFlags, NULL, NULL );
5360 if ( FAILED( hr ) ) {
5361 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve capture buffer.";
5365 if ( bufferFrameCount != 0 ) {
5366 // Push capture buffer into inputBuffer
5367 if ( captureBuffer.pushBuffer( ( char* ) streamBuffer,
5368 bufferFrameCount * stream_.nDeviceChannels[INPUT],
5369 stream_.deviceFormat[INPUT] ) )
5371 // Release capture buffer
5372 hr = captureClient->ReleaseBuffer( bufferFrameCount );
5373 if ( FAILED( hr ) ) {
5374 errorText = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5380 // Inform WASAPI that capture was unsuccessful
5381 hr = captureClient->ReleaseBuffer( 0 );
5382 if ( FAILED( hr ) ) {
5383 errorText = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5390 // Inform WASAPI that capture was unsuccessful
5391 hr = captureClient->ReleaseBuffer( 0 );
5392 if ( FAILED( hr ) ) {
5393 errorText = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5401 // 1. Get render buffer from stream
5402 // 2. Pull next buffer from outputBuffer
5403 // 3. If 2. was successful: Fill render buffer with next buffer
5404 // Release render buffer
5406 if ( renderAudioClient ) {
5407 // if the callback output buffer was not pushed to renderBuffer, wait for next render event
5408 if ( callbackPulled && !callbackPushed ) {
5409 WaitForSingleObject( renderEvent, INFINITE );
5412 // Get render buffer from stream
5413 hr = renderAudioClient->GetBufferSize( &bufferFrameCount );
5414 if ( FAILED( hr ) ) {
5415 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer size.";
5419 hr = renderAudioClient->GetCurrentPadding( &numFramesPadding );
5420 if ( FAILED( hr ) ) {
5421 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer padding.";
5425 bufferFrameCount -= numFramesPadding;
5427 if ( bufferFrameCount != 0 ) {
5428 hr = renderClient->GetBuffer( bufferFrameCount, &streamBuffer );
5429 if ( FAILED( hr ) ) {
5430 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer.";
5434 // Pull next buffer from outputBuffer
5435 // Fill render buffer with next buffer
5436 if ( renderBuffer.pullBuffer( ( char* ) streamBuffer,
5437 bufferFrameCount * stream_.nDeviceChannels[OUTPUT],
5438 stream_.deviceFormat[OUTPUT] ) )
5440 // Release render buffer
5441 hr = renderClient->ReleaseBuffer( bufferFrameCount, 0 );
5442 if ( FAILED( hr ) ) {
5443 errorText = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5449 // Inform WASAPI that render was unsuccessful
5450 hr = renderClient->ReleaseBuffer( 0, 0 );
5451 if ( FAILED( hr ) ) {
5452 errorText = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5459 // Inform WASAPI that render was unsuccessful
5460 hr = renderClient->ReleaseBuffer( 0, 0 );
5461 if ( FAILED( hr ) ) {
5462 errorText = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5468 // if the callback buffer was pushed renderBuffer reset callbackPulled flag
5469 if ( callbackPushed ) {
5470 // unsetting the callbackPulled flag lets the stream know that
5471 // the audio device is ready for another callback output buffer.
5472 callbackPulled = false;
5475 RtApi::tickStreamTime();
5482 CoTaskMemFree( captureFormat );
5483 CoTaskMemFree( renderFormat );
5485 free ( convBuffer );
5486 delete renderResampler;
5487 delete captureResampler;
5491 // update stream state
5492 stream_.state = STREAM_STOPPED;
5494 if ( !errorText.empty() )
5496 errorText_ = errorText;
5501 //******************** End of __WINDOWS_WASAPI__ *********************//
5505 #if defined(__WINDOWS_DS__) // Windows DirectSound API
5507 // Modified by Robin Davies, October 2005
5508 // - Improvements to DirectX pointer chasing.
5509 // - Bug fix for non-power-of-two Asio granularity used by Edirol PCR-A30.
5510 // - Auto-call CoInitialize for DSOUND and ASIO platforms.
5511 // Various revisions for RtAudio 4.0 by Gary Scavone, April 2007
5512 // Changed device query structure for RtAudio 4.0.7, January 2010
5514 #include <windows.h>
5515 #include <process.h>
5516 #include <mmsystem.h>
5520 #include <algorithm>
5522 #if defined(__MINGW32__)
5523 // missing from latest mingw winapi
5524 #define WAVE_FORMAT_96M08 0x00010000 /* 96 kHz, Mono, 8-bit */
5525 #define WAVE_FORMAT_96S08 0x00020000 /* 96 kHz, Stereo, 8-bit */
5526 #define WAVE_FORMAT_96M16 0x00040000 /* 96 kHz, Mono, 16-bit */
5527 #define WAVE_FORMAT_96S16 0x00080000 /* 96 kHz, Stereo, 16-bit */
5530 #define MINIMUM_DEVICE_BUFFER_SIZE 32768
5532 #ifdef _MSC_VER // if Microsoft Visual C++
5533 #pragma comment( lib, "winmm.lib" ) // then, auto-link winmm.lib. Otherwise, it has to be added manually.
5536 static inline DWORD dsPointerBetween( DWORD pointer, DWORD laterPointer, DWORD earlierPointer, DWORD bufferSize )
5538 if ( pointer > bufferSize ) pointer -= bufferSize;
5539 if ( laterPointer < earlierPointer ) laterPointer += bufferSize;
5540 if ( pointer < earlierPointer ) pointer += bufferSize;
5541 return pointer >= earlierPointer && pointer < laterPointer;
5544 // A structure to hold various information related to the DirectSound
5545 // API implementation.
5547 unsigned int drainCounter; // Tracks callback counts when draining
5548 bool internalDrain; // Indicates if stop is initiated from callback or not.
5552 UINT bufferPointer[2];
5553 DWORD dsBufferSize[2];
5554 DWORD dsPointerLeadTime[2]; // the number of bytes ahead of the safe pointer to lead by.
5558 :drainCounter(0), internalDrain(false) { id[0] = 0; id[1] = 0; buffer[0] = 0; buffer[1] = 0; xrun[0] = false; xrun[1] = false; bufferPointer[0] = 0; bufferPointer[1] = 0; }
5561 // Declarations for utility functions, callbacks, and structures
5562 // specific to the DirectSound implementation.
5563 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
5564 LPCTSTR description,
5568 static const char* getErrorString( int code );
5570 static unsigned __stdcall callbackHandler( void *ptr );
5579 : found(false) { validId[0] = false; validId[1] = false; }
5582 struct DsProbeData {
5584 std::vector<struct DsDevice>* dsDevices;
5587 RtApiDs :: RtApiDs()
5589 // Dsound will run both-threaded. If CoInitialize fails, then just
5590 // accept whatever the mainline chose for a threading model.
5591 coInitialized_ = false;
5592 HRESULT hr = CoInitialize( NULL );
5593 if ( !FAILED( hr ) ) coInitialized_ = true;
5596 RtApiDs :: ~RtApiDs()
5598 if ( stream_.state != STREAM_CLOSED ) closeStream();
5599 if ( coInitialized_ ) CoUninitialize(); // balanced call.
5602 // The DirectSound default output is always the first device.
5603 unsigned int RtApiDs :: getDefaultOutputDevice( void )
5608 // The DirectSound default input is always the first input device,
5609 // which is the first capture device enumerated.
5610 unsigned int RtApiDs :: getDefaultInputDevice( void )
5615 unsigned int RtApiDs :: getDeviceCount( void )
5617 // Set query flag for previously found devices to false, so that we
5618 // can check for any devices that have disappeared.
5619 for ( unsigned int i=0; i<dsDevices.size(); i++ )
5620 dsDevices[i].found = false;
5622 // Query DirectSound devices.
5623 struct DsProbeData probeInfo;
5624 probeInfo.isInput = false;
5625 probeInfo.dsDevices = &dsDevices;
5626 HRESULT result = DirectSoundEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
5627 if ( FAILED( result ) ) {
5628 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating output devices!";
5629 errorText_ = errorStream_.str();
5630 error( RtAudioError::WARNING );
5633 // Query DirectSoundCapture devices.
5634 probeInfo.isInput = true;
5635 result = DirectSoundCaptureEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
5636 if ( FAILED( result ) ) {
5637 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating input devices!";
5638 errorText_ = errorStream_.str();
5639 error( RtAudioError::WARNING );
5642 // Clean out any devices that may have disappeared (code update submitted by Eli Zehngut).
5643 for ( unsigned int i=0; i<dsDevices.size(); ) {
5644 if ( dsDevices[i].found == false ) dsDevices.erase( dsDevices.begin() + i );
5648 return static_cast<unsigned int>(dsDevices.size());
5651 RtAudio::DeviceInfo RtApiDs :: getDeviceInfo( unsigned int device )
5653 RtAudio::DeviceInfo info;
5654 info.probed = false;
5656 if ( dsDevices.size() == 0 ) {
5657 // Force a query of all devices
5659 if ( dsDevices.size() == 0 ) {
5660 errorText_ = "RtApiDs::getDeviceInfo: no devices found!";
5661 error( RtAudioError::INVALID_USE );
5666 if ( device >= dsDevices.size() ) {
5667 errorText_ = "RtApiDs::getDeviceInfo: device ID is invalid!";
5668 error( RtAudioError::INVALID_USE );
5673 if ( dsDevices[ device ].validId[0] == false ) goto probeInput;
5675 LPDIRECTSOUND output;
5677 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
5678 if ( FAILED( result ) ) {
5679 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
5680 errorText_ = errorStream_.str();
5681 error( RtAudioError::WARNING );
5685 outCaps.dwSize = sizeof( outCaps );
5686 result = output->GetCaps( &outCaps );
5687 if ( FAILED( result ) ) {
5689 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting capabilities!";
5690 errorText_ = errorStream_.str();
5691 error( RtAudioError::WARNING );
5695 // Get output channel information.
5696 info.outputChannels = ( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ? 2 : 1;
5698 // Get sample rate information.
5699 info.sampleRates.clear();
5700 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
5701 if ( SAMPLE_RATES[k] >= (unsigned int) outCaps.dwMinSecondarySampleRate &&
5702 SAMPLE_RATES[k] <= (unsigned int) outCaps.dwMaxSecondarySampleRate ) {
5703 info.sampleRates.push_back( SAMPLE_RATES[k] );
5705 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
5706 info.preferredSampleRate = SAMPLE_RATES[k];
5710 // Get format information.
5711 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT ) info.nativeFormats |= RTAUDIO_SINT16;
5712 if ( outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) info.nativeFormats |= RTAUDIO_SINT8;
5716 if ( getDefaultOutputDevice() == device )
5717 info.isDefaultOutput = true;
5719 if ( dsDevices[ device ].validId[1] == false ) {
5720 info.name = dsDevices[ device ].name;
5727 LPDIRECTSOUNDCAPTURE input;
5728 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
5729 if ( FAILED( result ) ) {
5730 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
5731 errorText_ = errorStream_.str();
5732 error( RtAudioError::WARNING );
5737 inCaps.dwSize = sizeof( inCaps );
5738 result = input->GetCaps( &inCaps );
5739 if ( FAILED( result ) ) {
5741 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting object capabilities (" << dsDevices[ device ].name << ")!";
5742 errorText_ = errorStream_.str();
5743 error( RtAudioError::WARNING );
5747 // Get input channel information.
5748 info.inputChannels = inCaps.dwChannels;
5750 // Get sample rate and format information.
5751 std::vector<unsigned int> rates;
5752 if ( inCaps.dwChannels >= 2 ) {
5753 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5754 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5755 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5756 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5757 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5758 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5759 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5760 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5762 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
5763 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) rates.push_back( 11025 );
5764 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) rates.push_back( 22050 );
5765 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) rates.push_back( 44100 );
5766 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) rates.push_back( 96000 );
5768 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
5769 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) rates.push_back( 11025 );
5770 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) rates.push_back( 22050 );
5771 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) rates.push_back( 44100 );
5772 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) rates.push_back( 96000 );
5775 else if ( inCaps.dwChannels == 1 ) {
5776 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5777 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5778 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5779 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5780 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5781 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5782 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5783 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5785 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
5786 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) rates.push_back( 11025 );
5787 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) rates.push_back( 22050 );
5788 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) rates.push_back( 44100 );
5789 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) rates.push_back( 96000 );
5791 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
5792 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) rates.push_back( 11025 );
5793 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) rates.push_back( 22050 );
5794 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) rates.push_back( 44100 );
5795 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) rates.push_back( 96000 );
5798 else info.inputChannels = 0; // technically, this would be an error
5802 if ( info.inputChannels == 0 ) return info;
5804 // Copy the supported rates to the info structure but avoid duplication.
5806 for ( unsigned int i=0; i<rates.size(); i++ ) {
5808 for ( unsigned int j=0; j<info.sampleRates.size(); j++ ) {
5809 if ( rates[i] == info.sampleRates[j] ) {
5814 if ( found == false ) info.sampleRates.push_back( rates[i] );
5816 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
5818 // If device opens for both playback and capture, we determine the channels.
5819 if ( info.outputChannels > 0 && info.inputChannels > 0 )
5820 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
5822 if ( device == 0 ) info.isDefaultInput = true;
5824 // Copy name and return.
5825 info.name = dsDevices[ device ].name;
5830 bool RtApiDs :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
5831 unsigned int firstChannel, unsigned int sampleRate,
5832 RtAudioFormat format, unsigned int *bufferSize,
5833 RtAudio::StreamOptions *options )
5835 if ( channels + firstChannel > 2 ) {
5836 errorText_ = "RtApiDs::probeDeviceOpen: DirectSound does not support more than 2 channels per device.";
5840 size_t nDevices = dsDevices.size();
5841 if ( nDevices == 0 ) {
5842 // This should not happen because a check is made before this function is called.
5843 errorText_ = "RtApiDs::probeDeviceOpen: no devices found!";
5847 if ( device >= nDevices ) {
5848 // This should not happen because a check is made before this function is called.
5849 errorText_ = "RtApiDs::probeDeviceOpen: device ID is invalid!";
5853 if ( mode == OUTPUT ) {
5854 if ( dsDevices[ device ].validId[0] == false ) {
5855 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support output!";
5856 errorText_ = errorStream_.str();
5860 else { // mode == INPUT
5861 if ( dsDevices[ device ].validId[1] == false ) {
5862 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support input!";
5863 errorText_ = errorStream_.str();
5868 // According to a note in PortAudio, using GetDesktopWindow()
5869 // instead of GetForegroundWindow() is supposed to avoid problems
5870 // that occur when the application's window is not the foreground
5871 // window. Also, if the application window closes before the
5872 // DirectSound buffer, DirectSound can crash. In the past, I had
5873 // problems when using GetDesktopWindow() but it seems fine now
5874 // (January 2010). I'll leave it commented here.
5875 // HWND hWnd = GetForegroundWindow();
5876 HWND hWnd = GetDesktopWindow();
5878 // Check the numberOfBuffers parameter and limit the lowest value to
5879 // two. This is a judgement call and a value of two is probably too
5880 // low for capture, but it should work for playback.
5882 if ( options ) nBuffers = options->numberOfBuffers;
5883 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) nBuffers = 2;
5884 if ( nBuffers < 2 ) nBuffers = 3;
5886 // Check the lower range of the user-specified buffer size and set
5887 // (arbitrarily) to a lower bound of 32.
5888 if ( *bufferSize < 32 ) *bufferSize = 32;
5890 // Create the wave format structure. The data format setting will
5891 // be determined later.
5892 WAVEFORMATEX waveFormat;
5893 ZeroMemory( &waveFormat, sizeof(WAVEFORMATEX) );
5894 waveFormat.wFormatTag = WAVE_FORMAT_PCM;
5895 waveFormat.nChannels = channels + firstChannel;
5896 waveFormat.nSamplesPerSec = (unsigned long) sampleRate;
5898 // Determine the device buffer size. By default, we'll use the value
5899 // defined above (32K), but we will grow it to make allowances for
5900 // very large software buffer sizes.
5901 DWORD dsBufferSize = MINIMUM_DEVICE_BUFFER_SIZE;
5902 DWORD dsPointerLeadTime = 0;
5904 void *ohandle = 0, *bhandle = 0;
5906 if ( mode == OUTPUT ) {
5908 LPDIRECTSOUND output;
5909 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
5910 if ( FAILED( result ) ) {
5911 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
5912 errorText_ = errorStream_.str();
5917 outCaps.dwSize = sizeof( outCaps );
5918 result = output->GetCaps( &outCaps );
5919 if ( FAILED( result ) ) {
5921 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting capabilities (" << dsDevices[ device ].name << ")!";
5922 errorText_ = errorStream_.str();
5926 // Check channel information.
5927 if ( channels + firstChannel == 2 && !( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ) {
5928 errorStream_ << "RtApiDs::getDeviceInfo: the output device (" << dsDevices[ device ].name << ") does not support stereo playback.";
5929 errorText_ = errorStream_.str();
5933 // Check format information. Use 16-bit format unless not
5934 // supported or user requests 8-bit.
5935 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT &&
5936 !( format == RTAUDIO_SINT8 && outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) ) {
5937 waveFormat.wBitsPerSample = 16;
5938 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
5941 waveFormat.wBitsPerSample = 8;
5942 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
5944 stream_.userFormat = format;
5946 // Update wave format structure and buffer information.
5947 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
5948 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
5949 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
5951 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
5952 while ( dsPointerLeadTime * 2U > dsBufferSize )
5955 // Set cooperative level to DSSCL_EXCLUSIVE ... sound stops when window focus changes.
5956 // result = output->SetCooperativeLevel( hWnd, DSSCL_EXCLUSIVE );
5957 // Set cooperative level to DSSCL_PRIORITY ... sound remains when window focus changes.
5958 result = output->SetCooperativeLevel( hWnd, DSSCL_PRIORITY );
5959 if ( FAILED( result ) ) {
5961 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting cooperative level (" << dsDevices[ device ].name << ")!";
5962 errorText_ = errorStream_.str();
5966 // Even though we will write to the secondary buffer, we need to
5967 // access the primary buffer to set the correct output format
5968 // (since the default is 8-bit, 22 kHz!). Setup the DS primary
5969 // buffer description.
5970 DSBUFFERDESC bufferDescription;
5971 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
5972 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
5973 bufferDescription.dwFlags = DSBCAPS_PRIMARYBUFFER;
5975 // Obtain the primary buffer
5976 LPDIRECTSOUNDBUFFER buffer;
5977 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
5978 if ( FAILED( result ) ) {
5980 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") accessing primary buffer (" << dsDevices[ device ].name << ")!";
5981 errorText_ = errorStream_.str();
5985 // Set the primary DS buffer sound format.
5986 result = buffer->SetFormat( &waveFormat );
5987 if ( FAILED( result ) ) {
5989 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting primary buffer format (" << dsDevices[ device ].name << ")!";
5990 errorText_ = errorStream_.str();
5994 // Setup the secondary DS buffer description.
5995 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
5996 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
5997 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
5998 DSBCAPS_GLOBALFOCUS |
5999 DSBCAPS_GETCURRENTPOSITION2 |
6000 DSBCAPS_LOCHARDWARE ); // Force hardware mixing
6001 bufferDescription.dwBufferBytes = dsBufferSize;
6002 bufferDescription.lpwfxFormat = &waveFormat;
6004 // Try to create the secondary DS buffer. If that doesn't work,
6005 // try to use software mixing. Otherwise, there's a problem.
6006 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
6007 if ( FAILED( result ) ) {
6008 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
6009 DSBCAPS_GLOBALFOCUS |
6010 DSBCAPS_GETCURRENTPOSITION2 |
6011 DSBCAPS_LOCSOFTWARE ); // Force software mixing
6012 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
6013 if ( FAILED( result ) ) {
6015 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating secondary buffer (" << dsDevices[ device ].name << ")!";
6016 errorText_ = errorStream_.str();
6021 // Get the buffer size ... might be different from what we specified.
6023 dsbcaps.dwSize = sizeof( DSBCAPS );
6024 result = buffer->GetCaps( &dsbcaps );
6025 if ( FAILED( result ) ) {
6028 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
6029 errorText_ = errorStream_.str();
6033 dsBufferSize = dsbcaps.dwBufferBytes;
6035 // Lock the DS buffer
6038 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
6039 if ( FAILED( result ) ) {
6042 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking buffer (" << dsDevices[ device ].name << ")!";
6043 errorText_ = errorStream_.str();
6047 // Zero the DS buffer
6048 ZeroMemory( audioPtr, dataLen );
6050 // Unlock the DS buffer
6051 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6052 if ( FAILED( result ) ) {
6055 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking buffer (" << dsDevices[ device ].name << ")!";
6056 errorText_ = errorStream_.str();
6060 ohandle = (void *) output;
6061 bhandle = (void *) buffer;
6064 if ( mode == INPUT ) {
6066 LPDIRECTSOUNDCAPTURE input;
6067 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
6068 if ( FAILED( result ) ) {
6069 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
6070 errorText_ = errorStream_.str();
6075 inCaps.dwSize = sizeof( inCaps );
6076 result = input->GetCaps( &inCaps );
6077 if ( FAILED( result ) ) {
6079 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting input capabilities (" << dsDevices[ device ].name << ")!";
6080 errorText_ = errorStream_.str();
6084 // Check channel information.
6085 if ( inCaps.dwChannels < channels + firstChannel ) {
6086 errorText_ = "RtApiDs::getDeviceInfo: the input device does not support requested input channels.";
6090 // Check format information. Use 16-bit format unless user
6092 DWORD deviceFormats;
6093 if ( channels + firstChannel == 2 ) {
6094 deviceFormats = WAVE_FORMAT_1S08 | WAVE_FORMAT_2S08 | WAVE_FORMAT_4S08 | WAVE_FORMAT_96S08;
6095 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
6096 waveFormat.wBitsPerSample = 8;
6097 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
6099 else { // assume 16-bit is supported
6100 waveFormat.wBitsPerSample = 16;
6101 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
6104 else { // channel == 1
6105 deviceFormats = WAVE_FORMAT_1M08 | WAVE_FORMAT_2M08 | WAVE_FORMAT_4M08 | WAVE_FORMAT_96M08;
6106 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
6107 waveFormat.wBitsPerSample = 8;
6108 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
6110 else { // assume 16-bit is supported
6111 waveFormat.wBitsPerSample = 16;
6112 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
6115 stream_.userFormat = format;
6117 // Update wave format structure and buffer information.
6118 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
6119 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
6120 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
6122 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
6123 while ( dsPointerLeadTime * 2U > dsBufferSize )
6126 // Setup the secondary DS buffer description.
6127 DSCBUFFERDESC bufferDescription;
6128 ZeroMemory( &bufferDescription, sizeof( DSCBUFFERDESC ) );
6129 bufferDescription.dwSize = sizeof( DSCBUFFERDESC );
6130 bufferDescription.dwFlags = 0;
6131 bufferDescription.dwReserved = 0;
6132 bufferDescription.dwBufferBytes = dsBufferSize;
6133 bufferDescription.lpwfxFormat = &waveFormat;
6135 // Create the capture buffer.
6136 LPDIRECTSOUNDCAPTUREBUFFER buffer;
6137 result = input->CreateCaptureBuffer( &bufferDescription, &buffer, NULL );
6138 if ( FAILED( result ) ) {
6140 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating input buffer (" << dsDevices[ device ].name << ")!";
6141 errorText_ = errorStream_.str();
6145 // Get the buffer size ... might be different from what we specified.
6147 dscbcaps.dwSize = sizeof( DSCBCAPS );
6148 result = buffer->GetCaps( &dscbcaps );
6149 if ( FAILED( result ) ) {
6152 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
6153 errorText_ = errorStream_.str();
6157 dsBufferSize = dscbcaps.dwBufferBytes;
6159 // NOTE: We could have a problem here if this is a duplex stream
6160 // and the play and capture hardware buffer sizes are different
6161 // (I'm actually not sure if that is a problem or not).
6162 // Currently, we are not verifying that.
6164 // Lock the capture buffer
6167 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
6168 if ( FAILED( result ) ) {
6171 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking input buffer (" << dsDevices[ device ].name << ")!";
6172 errorText_ = errorStream_.str();
6177 ZeroMemory( audioPtr, dataLen );
6179 // Unlock the buffer
6180 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6181 if ( FAILED( result ) ) {
6184 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking input buffer (" << dsDevices[ device ].name << ")!";
6185 errorText_ = errorStream_.str();
6189 ohandle = (void *) input;
6190 bhandle = (void *) buffer;
6193 // Set various stream parameters
6194 DsHandle *handle = 0;
6195 stream_.nDeviceChannels[mode] = channels + firstChannel;
6196 stream_.nUserChannels[mode] = channels;
6197 stream_.bufferSize = *bufferSize;
6198 stream_.channelOffset[mode] = firstChannel;
6199 stream_.deviceInterleaved[mode] = true;
6200 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
6201 else stream_.userInterleaved = true;
6203 // Set flag for buffer conversion
6204 stream_.doConvertBuffer[mode] = false;
6205 if (stream_.nUserChannels[mode] != stream_.nDeviceChannels[mode])
6206 stream_.doConvertBuffer[mode] = true;
6207 if (stream_.userFormat != stream_.deviceFormat[mode])
6208 stream_.doConvertBuffer[mode] = true;
6209 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
6210 stream_.nUserChannels[mode] > 1 )
6211 stream_.doConvertBuffer[mode] = true;
6213 // Allocate necessary internal buffers
6214 long bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
6215 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
6216 if ( stream_.userBuffer[mode] == NULL ) {
6217 errorText_ = "RtApiDs::probeDeviceOpen: error allocating user buffer memory.";
6221 if ( stream_.doConvertBuffer[mode] ) {
6223 bool makeBuffer = true;
6224 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
6225 if ( mode == INPUT ) {
6226 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
6227 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
6228 if ( bufferBytes <= (long) bytesOut ) makeBuffer = false;
6233 bufferBytes *= *bufferSize;
6234 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
6235 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
6236 if ( stream_.deviceBuffer == NULL ) {
6237 errorText_ = "RtApiDs::probeDeviceOpen: error allocating device buffer memory.";
6243 // Allocate our DsHandle structures for the stream.
6244 if ( stream_.apiHandle == 0 ) {
6246 handle = new DsHandle;
6248 catch ( std::bad_alloc& ) {
6249 errorText_ = "RtApiDs::probeDeviceOpen: error allocating AsioHandle memory.";
6253 // Create a manual-reset event.
6254 handle->condition = CreateEvent( NULL, // no security
6255 TRUE, // manual-reset
6256 FALSE, // non-signaled initially
6258 stream_.apiHandle = (void *) handle;
6261 handle = (DsHandle *) stream_.apiHandle;
6262 handle->id[mode] = ohandle;
6263 handle->buffer[mode] = bhandle;
6264 handle->dsBufferSize[mode] = dsBufferSize;
6265 handle->dsPointerLeadTime[mode] = dsPointerLeadTime;
6267 stream_.device[mode] = device;
6268 stream_.state = STREAM_STOPPED;
6269 if ( stream_.mode == OUTPUT && mode == INPUT )
6270 // We had already set up an output stream.
6271 stream_.mode = DUPLEX;
6273 stream_.mode = mode;
6274 stream_.nBuffers = nBuffers;
6275 stream_.sampleRate = sampleRate;
6277 // Setup the buffer conversion information structure.
6278 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
6280 // Setup the callback thread.
6281 if ( stream_.callbackInfo.isRunning == false ) {
6283 stream_.callbackInfo.isRunning = true;
6284 stream_.callbackInfo.object = (void *) this;
6285 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &callbackHandler,
6286 &stream_.callbackInfo, 0, &threadId );
6287 if ( stream_.callbackInfo.thread == 0 ) {
6288 errorText_ = "RtApiDs::probeDeviceOpen: error creating callback thread!";
6292 // Boost DS thread priority
6293 SetThreadPriority( (HANDLE) stream_.callbackInfo.thread, THREAD_PRIORITY_HIGHEST );
6299 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
6300 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
6301 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6302 if ( buffer ) buffer->Release();
6305 if ( handle->buffer[1] ) {
6306 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
6307 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6308 if ( buffer ) buffer->Release();
6311 CloseHandle( handle->condition );
6313 stream_.apiHandle = 0;
6316 for ( int i=0; i<2; i++ ) {
6317 if ( stream_.userBuffer[i] ) {
6318 free( stream_.userBuffer[i] );
6319 stream_.userBuffer[i] = 0;
6323 if ( stream_.deviceBuffer ) {
6324 free( stream_.deviceBuffer );
6325 stream_.deviceBuffer = 0;
6328 stream_.state = STREAM_CLOSED;
6332 void RtApiDs :: closeStream()
6334 if ( stream_.state == STREAM_CLOSED ) {
6335 errorText_ = "RtApiDs::closeStream(): no open stream to close!";
6336 error( RtAudioError::WARNING );
6340 // Stop the callback thread.
6341 stream_.callbackInfo.isRunning = false;
6342 WaitForSingleObject( (HANDLE) stream_.callbackInfo.thread, INFINITE );
6343 CloseHandle( (HANDLE) stream_.callbackInfo.thread );
6345 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6347 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
6348 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
6349 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6356 if ( handle->buffer[1] ) {
6357 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
6358 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6365 CloseHandle( handle->condition );
6367 stream_.apiHandle = 0;
6370 for ( int i=0; i<2; i++ ) {
6371 if ( stream_.userBuffer[i] ) {
6372 free( stream_.userBuffer[i] );
6373 stream_.userBuffer[i] = 0;
6377 if ( stream_.deviceBuffer ) {
6378 free( stream_.deviceBuffer );
6379 stream_.deviceBuffer = 0;
6382 stream_.mode = UNINITIALIZED;
6383 stream_.state = STREAM_CLOSED;
6386 void RtApiDs :: startStream()
6389 if ( stream_.state == STREAM_RUNNING ) {
6390 errorText_ = "RtApiDs::startStream(): the stream is already running!";
6391 error( RtAudioError::WARNING );
6395 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6397 // Increase scheduler frequency on lesser windows (a side-effect of
6398 // increasing timer accuracy). On greater windows (Win2K or later),
6399 // this is already in effect.
6400 timeBeginPeriod( 1 );
6402 buffersRolling = false;
6403 duplexPrerollBytes = 0;
6405 if ( stream_.mode == DUPLEX ) {
6406 // 0.5 seconds of silence in DUPLEX mode while the devices spin up and synchronize.
6407 duplexPrerollBytes = (int) ( 0.5 * stream_.sampleRate * formatBytes( stream_.deviceFormat[1] ) * stream_.nDeviceChannels[1] );
6411 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6413 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6414 result = buffer->Play( 0, 0, DSBPLAY_LOOPING );
6415 if ( FAILED( result ) ) {
6416 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting output buffer!";
6417 errorText_ = errorStream_.str();
6422 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6424 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6425 result = buffer->Start( DSCBSTART_LOOPING );
6426 if ( FAILED( result ) ) {
6427 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting input buffer!";
6428 errorText_ = errorStream_.str();
6433 handle->drainCounter = 0;
6434 handle->internalDrain = false;
6435 ResetEvent( handle->condition );
6436 stream_.state = STREAM_RUNNING;
6439 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
6442 void RtApiDs :: stopStream()
6445 if ( stream_.state == STREAM_STOPPED ) {
6446 errorText_ = "RtApiDs::stopStream(): the stream is already stopped!";
6447 error( RtAudioError::WARNING );
6454 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6455 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6456 if ( handle->drainCounter == 0 ) {
6457 handle->drainCounter = 2;
6458 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
6461 stream_.state = STREAM_STOPPED;
6463 MUTEX_LOCK( &stream_.mutex );
6465 // Stop the buffer and clear memory
6466 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6467 result = buffer->Stop();
6468 if ( FAILED( result ) ) {
6469 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping output buffer!";
6470 errorText_ = errorStream_.str();
6474 // Lock the buffer and clear it so that if we start to play again,
6475 // we won't have old data playing.
6476 result = buffer->Lock( 0, handle->dsBufferSize[0], &audioPtr, &dataLen, NULL, NULL, 0 );
6477 if ( FAILED( result ) ) {
6478 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking output buffer!";
6479 errorText_ = errorStream_.str();
6483 // Zero the DS buffer
6484 ZeroMemory( audioPtr, dataLen );
6486 // Unlock the DS buffer
6487 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6488 if ( FAILED( result ) ) {
6489 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking output buffer!";
6490 errorText_ = errorStream_.str();
6494 // If we start playing again, we must begin at beginning of buffer.
6495 handle->bufferPointer[0] = 0;
6498 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6499 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6503 stream_.state = STREAM_STOPPED;
6505 if ( stream_.mode != DUPLEX )
6506 MUTEX_LOCK( &stream_.mutex );
6508 result = buffer->Stop();
6509 if ( FAILED( result ) ) {
6510 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping input buffer!";
6511 errorText_ = errorStream_.str();
6515 // Lock the buffer and clear it so that if we start to play again,
6516 // we won't have old data playing.
6517 result = buffer->Lock( 0, handle->dsBufferSize[1], &audioPtr, &dataLen, NULL, NULL, 0 );
6518 if ( FAILED( result ) ) {
6519 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking input buffer!";
6520 errorText_ = errorStream_.str();
6524 // Zero the DS buffer
6525 ZeroMemory( audioPtr, dataLen );
6527 // Unlock the DS buffer
6528 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6529 if ( FAILED( result ) ) {
6530 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking input buffer!";
6531 errorText_ = errorStream_.str();
6535 // If we start recording again, we must begin at beginning of buffer.
6536 handle->bufferPointer[1] = 0;
6540 timeEndPeriod( 1 ); // revert to normal scheduler frequency on lesser windows.
6541 MUTEX_UNLOCK( &stream_.mutex );
6543 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
6546 void RtApiDs :: abortStream()
6549 if ( stream_.state == STREAM_STOPPED ) {
6550 errorText_ = "RtApiDs::abortStream(): the stream is already stopped!";
6551 error( RtAudioError::WARNING );
6555 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6556 handle->drainCounter = 2;
6561 void RtApiDs :: callbackEvent()
6563 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) {
6564 Sleep( 50 ); // sleep 50 milliseconds
6568 if ( stream_.state == STREAM_CLOSED ) {
6569 errorText_ = "RtApiDs::callbackEvent(): the stream is closed ... this shouldn't happen!";
6570 error( RtAudioError::WARNING );
6574 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
6575 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6577 // Check if we were draining the stream and signal is finished.
6578 if ( handle->drainCounter > stream_.nBuffers + 2 ) {
6580 stream_.state = STREAM_STOPPING;
6581 if ( handle->internalDrain == false )
6582 SetEvent( handle->condition );
6588 // Invoke user callback to get fresh output data UNLESS we are
6590 if ( handle->drainCounter == 0 ) {
6591 RtAudioCallback callback = (RtAudioCallback) info->callback;
6592 double streamTime = getStreamTime();
6593 RtAudioStreamStatus status = 0;
6594 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
6595 status |= RTAUDIO_OUTPUT_UNDERFLOW;
6596 handle->xrun[0] = false;
6598 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
6599 status |= RTAUDIO_INPUT_OVERFLOW;
6600 handle->xrun[1] = false;
6602 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
6603 stream_.bufferSize, streamTime, status, info->userData );
6604 if ( cbReturnValue == 2 ) {
6605 stream_.state = STREAM_STOPPING;
6606 handle->drainCounter = 2;
6610 else if ( cbReturnValue == 1 ) {
6611 handle->drainCounter = 1;
6612 handle->internalDrain = true;
6617 DWORD currentWritePointer, safeWritePointer;
6618 DWORD currentReadPointer, safeReadPointer;
6619 UINT nextWritePointer;
6621 LPVOID buffer1 = NULL;
6622 LPVOID buffer2 = NULL;
6623 DWORD bufferSize1 = 0;
6624 DWORD bufferSize2 = 0;
6629 MUTEX_LOCK( &stream_.mutex );
6630 if ( stream_.state == STREAM_STOPPED ) {
6631 MUTEX_UNLOCK( &stream_.mutex );
6635 if ( buffersRolling == false ) {
6636 if ( stream_.mode == DUPLEX ) {
6637 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
6639 // It takes a while for the devices to get rolling. As a result,
6640 // there's no guarantee that the capture and write device pointers
6641 // will move in lockstep. Wait here for both devices to start
6642 // rolling, and then set our buffer pointers accordingly.
6643 // e.g. Crystal Drivers: the capture buffer starts up 5700 to 9600
6644 // bytes later than the write buffer.
6646 // Stub: a serious risk of having a pre-emptive scheduling round
6647 // take place between the two GetCurrentPosition calls... but I'm
6648 // really not sure how to solve the problem. Temporarily boost to
6649 // Realtime priority, maybe; but I'm not sure what priority the
6650 // DirectSound service threads run at. We *should* be roughly
6651 // within a ms or so of correct.
6653 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6654 LPDIRECTSOUNDCAPTUREBUFFER dsCaptureBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6656 DWORD startSafeWritePointer, startSafeReadPointer;
6658 result = dsWriteBuffer->GetCurrentPosition( NULL, &startSafeWritePointer );
6659 if ( FAILED( result ) ) {
6660 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6661 errorText_ = errorStream_.str();
6662 MUTEX_UNLOCK( &stream_.mutex );
6663 error( RtAudioError::SYSTEM_ERROR );
6666 result = dsCaptureBuffer->GetCurrentPosition( NULL, &startSafeReadPointer );
6667 if ( FAILED( result ) ) {
6668 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6669 errorText_ = errorStream_.str();
6670 MUTEX_UNLOCK( &stream_.mutex );
6671 error( RtAudioError::SYSTEM_ERROR );
6675 result = dsWriteBuffer->GetCurrentPosition( NULL, &safeWritePointer );
6676 if ( FAILED( result ) ) {
6677 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6678 errorText_ = errorStream_.str();
6679 MUTEX_UNLOCK( &stream_.mutex );
6680 error( RtAudioError::SYSTEM_ERROR );
6683 result = dsCaptureBuffer->GetCurrentPosition( NULL, &safeReadPointer );
6684 if ( FAILED( result ) ) {
6685 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6686 errorText_ = errorStream_.str();
6687 MUTEX_UNLOCK( &stream_.mutex );
6688 error( RtAudioError::SYSTEM_ERROR );
6691 if ( safeWritePointer != startSafeWritePointer && safeReadPointer != startSafeReadPointer ) break;
6695 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
6697 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
6698 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
6699 handle->bufferPointer[1] = safeReadPointer;
6701 else if ( stream_.mode == OUTPUT ) {
6703 // Set the proper nextWritePosition after initial startup.
6704 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6705 result = dsWriteBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
6706 if ( FAILED( result ) ) {
6707 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6708 errorText_ = errorStream_.str();
6709 MUTEX_UNLOCK( &stream_.mutex );
6710 error( RtAudioError::SYSTEM_ERROR );
6713 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
6714 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
6717 buffersRolling = true;
6720 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6722 LPDIRECTSOUNDBUFFER dsBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6724 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
6725 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
6726 bufferBytes *= formatBytes( stream_.userFormat );
6727 memset( stream_.userBuffer[0], 0, bufferBytes );
6730 // Setup parameters and do buffer conversion if necessary.
6731 if ( stream_.doConvertBuffer[0] ) {
6732 buffer = stream_.deviceBuffer;
6733 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
6734 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[0];
6735 bufferBytes *= formatBytes( stream_.deviceFormat[0] );
6738 buffer = stream_.userBuffer[0];
6739 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
6740 bufferBytes *= formatBytes( stream_.userFormat );
6743 // No byte swapping necessary in DirectSound implementation.
6745 // Ahhh ... windoze. 16-bit data is signed but 8-bit data is
6746 // unsigned. So, we need to convert our signed 8-bit data here to
6748 if ( stream_.deviceFormat[0] == RTAUDIO_SINT8 )
6749 for ( int i=0; i<bufferBytes; i++ ) buffer[i] = (unsigned char) ( buffer[i] + 128 );
6751 DWORD dsBufferSize = handle->dsBufferSize[0];
6752 nextWritePointer = handle->bufferPointer[0];
6754 DWORD endWrite, leadPointer;
6756 // Find out where the read and "safe write" pointers are.
6757 result = dsBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
6758 if ( FAILED( result ) ) {
6759 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6760 errorText_ = errorStream_.str();
6761 MUTEX_UNLOCK( &stream_.mutex );
6762 error( RtAudioError::SYSTEM_ERROR );
6766 // We will copy our output buffer into the region between
6767 // safeWritePointer and leadPointer. If leadPointer is not
6768 // beyond the next endWrite position, wait until it is.
6769 leadPointer = safeWritePointer + handle->dsPointerLeadTime[0];
6770 //std::cout << "safeWritePointer = " << safeWritePointer << ", leadPointer = " << leadPointer << ", nextWritePointer = " << nextWritePointer << std::endl;
6771 if ( leadPointer > dsBufferSize ) leadPointer -= dsBufferSize;
6772 if ( leadPointer < nextWritePointer ) leadPointer += dsBufferSize; // unwrap offset
6773 endWrite = nextWritePointer + bufferBytes;
6775 // Check whether the entire write region is behind the play pointer.
6776 if ( leadPointer >= endWrite ) break;
6778 // If we are here, then we must wait until the leadPointer advances
6779 // beyond the end of our next write region. We use the
6780 // Sleep() function to suspend operation until that happens.
6781 double millis = ( endWrite - leadPointer ) * 1000.0;
6782 millis /= ( formatBytes( stream_.deviceFormat[0]) * stream_.nDeviceChannels[0] * stream_.sampleRate);
6783 if ( millis < 1.0 ) millis = 1.0;
6784 Sleep( (DWORD) millis );
6787 if ( dsPointerBetween( nextWritePointer, safeWritePointer, currentWritePointer, dsBufferSize )
6788 || dsPointerBetween( endWrite, safeWritePointer, currentWritePointer, dsBufferSize ) ) {
6789 // We've strayed into the forbidden zone ... resync the read pointer.
6790 handle->xrun[0] = true;
6791 nextWritePointer = safeWritePointer + handle->dsPointerLeadTime[0] - bufferBytes;
6792 if ( nextWritePointer >= dsBufferSize ) nextWritePointer -= dsBufferSize;
6793 handle->bufferPointer[0] = nextWritePointer;
6794 endWrite = nextWritePointer + bufferBytes;
6797 // Lock free space in the buffer
6798 result = dsBuffer->Lock( nextWritePointer, bufferBytes, &buffer1,
6799 &bufferSize1, &buffer2, &bufferSize2, 0 );
6800 if ( FAILED( result ) ) {
6801 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking buffer during playback!";
6802 errorText_ = errorStream_.str();
6803 MUTEX_UNLOCK( &stream_.mutex );
6804 error( RtAudioError::SYSTEM_ERROR );
6808 // Copy our buffer into the DS buffer
6809 CopyMemory( buffer1, buffer, bufferSize1 );
6810 if ( buffer2 != NULL ) CopyMemory( buffer2, buffer+bufferSize1, bufferSize2 );
6812 // Update our buffer offset and unlock sound buffer
6813 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
6814 if ( FAILED( result ) ) {
6815 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking buffer during playback!";
6816 errorText_ = errorStream_.str();
6817 MUTEX_UNLOCK( &stream_.mutex );
6818 error( RtAudioError::SYSTEM_ERROR );
6821 nextWritePointer = ( nextWritePointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
6822 handle->bufferPointer[0] = nextWritePointer;
6825 // Don't bother draining input
6826 if ( handle->drainCounter ) {
6827 handle->drainCounter++;
6831 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6833 // Setup parameters.
6834 if ( stream_.doConvertBuffer[1] ) {
6835 buffer = stream_.deviceBuffer;
6836 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[1];
6837 bufferBytes *= formatBytes( stream_.deviceFormat[1] );
6840 buffer = stream_.userBuffer[1];
6841 bufferBytes = stream_.bufferSize * stream_.nUserChannels[1];
6842 bufferBytes *= formatBytes( stream_.userFormat );
6845 LPDIRECTSOUNDCAPTUREBUFFER dsBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6846 long nextReadPointer = handle->bufferPointer[1];
6847 DWORD dsBufferSize = handle->dsBufferSize[1];
6849 // Find out where the write and "safe read" pointers are.
6850 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
6851 if ( FAILED( result ) ) {
6852 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6853 errorText_ = errorStream_.str();
6854 MUTEX_UNLOCK( &stream_.mutex );
6855 error( RtAudioError::SYSTEM_ERROR );
6859 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
6860 DWORD endRead = nextReadPointer + bufferBytes;
6862 // Handling depends on whether we are INPUT or DUPLEX.
6863 // If we're in INPUT mode then waiting is a good thing. If we're in DUPLEX mode,
6864 // then a wait here will drag the write pointers into the forbidden zone.
6866 // In DUPLEX mode, rather than wait, we will back off the read pointer until
6867 // it's in a safe position. This causes dropouts, but it seems to be the only
6868 // practical way to sync up the read and write pointers reliably, given the
6869 // the very complex relationship between phase and increment of the read and write
6872 // In order to minimize audible dropouts in DUPLEX mode, we will
6873 // provide a pre-roll period of 0.5 seconds in which we return
6874 // zeros from the read buffer while the pointers sync up.
6876 if ( stream_.mode == DUPLEX ) {
6877 if ( safeReadPointer < endRead ) {
6878 if ( duplexPrerollBytes <= 0 ) {
6879 // Pre-roll time over. Be more agressive.
6880 int adjustment = endRead-safeReadPointer;
6882 handle->xrun[1] = true;
6884 // - large adjustments: we've probably run out of CPU cycles, so just resync exactly,
6885 // and perform fine adjustments later.
6886 // - small adjustments: back off by twice as much.
6887 if ( adjustment >= 2*bufferBytes )
6888 nextReadPointer = safeReadPointer-2*bufferBytes;
6890 nextReadPointer = safeReadPointer-bufferBytes-adjustment;
6892 if ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
6896 // In pre=roll time. Just do it.
6897 nextReadPointer = safeReadPointer - bufferBytes;
6898 while ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
6900 endRead = nextReadPointer + bufferBytes;
6903 else { // mode == INPUT
6904 while ( safeReadPointer < endRead && stream_.callbackInfo.isRunning ) {
6905 // See comments for playback.
6906 double millis = (endRead - safeReadPointer) * 1000.0;
6907 millis /= ( formatBytes(stream_.deviceFormat[1]) * stream_.nDeviceChannels[1] * stream_.sampleRate);
6908 if ( millis < 1.0 ) millis = 1.0;
6909 Sleep( (DWORD) millis );
6911 // Wake up and find out where we are now.
6912 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
6913 if ( FAILED( result ) ) {
6914 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6915 errorText_ = errorStream_.str();
6916 MUTEX_UNLOCK( &stream_.mutex );
6917 error( RtAudioError::SYSTEM_ERROR );
6921 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
6925 // Lock free space in the buffer
6926 result = dsBuffer->Lock( nextReadPointer, bufferBytes, &buffer1,
6927 &bufferSize1, &buffer2, &bufferSize2, 0 );
6928 if ( FAILED( result ) ) {
6929 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking capture buffer!";
6930 errorText_ = errorStream_.str();
6931 MUTEX_UNLOCK( &stream_.mutex );
6932 error( RtAudioError::SYSTEM_ERROR );
6936 if ( duplexPrerollBytes <= 0 ) {
6937 // Copy our buffer into the DS buffer
6938 CopyMemory( buffer, buffer1, bufferSize1 );
6939 if ( buffer2 != NULL ) CopyMemory( buffer+bufferSize1, buffer2, bufferSize2 );
6942 memset( buffer, 0, bufferSize1 );
6943 if ( buffer2 != NULL ) memset( buffer + bufferSize1, 0, bufferSize2 );
6944 duplexPrerollBytes -= bufferSize1 + bufferSize2;
6947 // Update our buffer offset and unlock sound buffer
6948 nextReadPointer = ( nextReadPointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
6949 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
6950 if ( FAILED( result ) ) {
6951 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking capture buffer!";
6952 errorText_ = errorStream_.str();
6953 MUTEX_UNLOCK( &stream_.mutex );
6954 error( RtAudioError::SYSTEM_ERROR );
6957 handle->bufferPointer[1] = nextReadPointer;
6959 // No byte swapping necessary in DirectSound implementation.
6961 // If necessary, convert 8-bit data from unsigned to signed.
6962 if ( stream_.deviceFormat[1] == RTAUDIO_SINT8 )
6963 for ( int j=0; j<bufferBytes; j++ ) buffer[j] = (signed char) ( buffer[j] - 128 );
6965 // Do buffer conversion if necessary.
6966 if ( stream_.doConvertBuffer[1] )
6967 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
6971 MUTEX_UNLOCK( &stream_.mutex );
6972 RtApi::tickStreamTime();
6975 // Definitions for utility functions and callbacks
6976 // specific to the DirectSound implementation.
6978 static unsigned __stdcall callbackHandler( void *ptr )
6980 CallbackInfo *info = (CallbackInfo *) ptr;
6981 RtApiDs *object = (RtApiDs *) info->object;
6982 bool* isRunning = &info->isRunning;
6984 while ( *isRunning == true ) {
6985 object->callbackEvent();
6992 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
6993 LPCTSTR description,
6997 struct DsProbeData& probeInfo = *(struct DsProbeData*) lpContext;
6998 std::vector<struct DsDevice>& dsDevices = *probeInfo.dsDevices;
7001 bool validDevice = false;
7002 if ( probeInfo.isInput == true ) {
7004 LPDIRECTSOUNDCAPTURE object;
7006 hr = DirectSoundCaptureCreate( lpguid, &object, NULL );
7007 if ( hr != DS_OK ) return TRUE;
7009 caps.dwSize = sizeof(caps);
7010 hr = object->GetCaps( &caps );
7011 if ( hr == DS_OK ) {
7012 if ( caps.dwChannels > 0 && caps.dwFormats > 0 )
7019 LPDIRECTSOUND object;
7020 hr = DirectSoundCreate( lpguid, &object, NULL );
7021 if ( hr != DS_OK ) return TRUE;
7023 caps.dwSize = sizeof(caps);
7024 hr = object->GetCaps( &caps );
7025 if ( hr == DS_OK ) {
7026 if ( caps.dwFlags & DSCAPS_PRIMARYMONO || caps.dwFlags & DSCAPS_PRIMARYSTEREO )
7032 // If good device, then save its name and guid.
7033 std::string name = convertCharPointerToStdString( description );
7034 //if ( name == "Primary Sound Driver" || name == "Primary Sound Capture Driver" )
7035 if ( lpguid == NULL )
7036 name = "Default Device";
7037 if ( validDevice ) {
7038 for ( unsigned int i=0; i<dsDevices.size(); i++ ) {
7039 if ( dsDevices[i].name == name ) {
7040 dsDevices[i].found = true;
7041 if ( probeInfo.isInput ) {
7042 dsDevices[i].id[1] = lpguid;
7043 dsDevices[i].validId[1] = true;
7046 dsDevices[i].id[0] = lpguid;
7047 dsDevices[i].validId[0] = true;
7055 device.found = true;
7056 if ( probeInfo.isInput ) {
7057 device.id[1] = lpguid;
7058 device.validId[1] = true;
7061 device.id[0] = lpguid;
7062 device.validId[0] = true;
7064 dsDevices.push_back( device );
7070 static const char* getErrorString( int code )
7074 case DSERR_ALLOCATED:
7075 return "Already allocated";
7077 case DSERR_CONTROLUNAVAIL:
7078 return "Control unavailable";
7080 case DSERR_INVALIDPARAM:
7081 return "Invalid parameter";
7083 case DSERR_INVALIDCALL:
7084 return "Invalid call";
7087 return "Generic error";
7089 case DSERR_PRIOLEVELNEEDED:
7090 return "Priority level needed";
7092 case DSERR_OUTOFMEMORY:
7093 return "Out of memory";
7095 case DSERR_BADFORMAT:
7096 return "The sample rate or the channel format is not supported";
7098 case DSERR_UNSUPPORTED:
7099 return "Not supported";
7101 case DSERR_NODRIVER:
7104 case DSERR_ALREADYINITIALIZED:
7105 return "Already initialized";
7107 case DSERR_NOAGGREGATION:
7108 return "No aggregation";
7110 case DSERR_BUFFERLOST:
7111 return "Buffer lost";
7113 case DSERR_OTHERAPPHASPRIO:
7114 return "Another application already has priority";
7116 case DSERR_UNINITIALIZED:
7117 return "Uninitialized";
7120 return "DirectSound unknown error";
7123 //******************** End of __WINDOWS_DS__ *********************//
7127 #if defined(__LINUX_ALSA__)
7129 #include <alsa/asoundlib.h>
7132 // A structure to hold various information related to the ALSA API
7135 snd_pcm_t *handles[2];
7138 pthread_cond_t runnable_cv;
7142 :synchronized(false), runnable(false) { xrun[0] = false; xrun[1] = false; }
7145 static void *alsaCallbackHandler( void * ptr );
7147 RtApiAlsa :: RtApiAlsa()
7149 // Nothing to do here.
7152 RtApiAlsa :: ~RtApiAlsa()
7154 if ( stream_.state != STREAM_CLOSED ) closeStream();
7157 unsigned int RtApiAlsa :: getDeviceCount( void )
7159 unsigned nDevices = 0;
7160 int result, subdevice, card;
7164 // Count cards and devices
7166 snd_card_next( &card );
7167 while ( card >= 0 ) {
7168 sprintf( name, "hw:%d", card );
7169 result = snd_ctl_open( &handle, name, 0 );
7171 errorStream_ << "RtApiAlsa::getDeviceCount: control open, card = " << card << ", " << snd_strerror( result ) << ".";
7172 errorText_ = errorStream_.str();
7173 error( RtAudioError::WARNING );
7178 result = snd_ctl_pcm_next_device( handle, &subdevice );
7180 errorStream_ << "RtApiAlsa::getDeviceCount: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
7181 errorText_ = errorStream_.str();
7182 error( RtAudioError::WARNING );
7185 if ( subdevice < 0 )
7190 snd_ctl_close( handle );
7191 snd_card_next( &card );
7194 result = snd_ctl_open( &handle, "default", 0 );
7197 snd_ctl_close( handle );
7203 RtAudio::DeviceInfo RtApiAlsa :: getDeviceInfo( unsigned int device )
7205 RtAudio::DeviceInfo info;
7206 info.probed = false;
7208 unsigned nDevices = 0;
7209 int result, subdevice, card;
7213 // Count cards and devices
7216 snd_card_next( &card );
7217 while ( card >= 0 ) {
7218 sprintf( name, "hw:%d", card );
7219 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
7221 errorStream_ << "RtApiAlsa::getDeviceInfo: control open, card = " << card << ", " << snd_strerror( result ) << ".";
7222 errorText_ = errorStream_.str();
7223 error( RtAudioError::WARNING );
7228 result = snd_ctl_pcm_next_device( chandle, &subdevice );
7230 errorStream_ << "RtApiAlsa::getDeviceInfo: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
7231 errorText_ = errorStream_.str();
7232 error( RtAudioError::WARNING );
7235 if ( subdevice < 0 ) break;
7236 if ( nDevices == device ) {
7237 sprintf( name, "hw:%d,%d", card, subdevice );
7243 snd_ctl_close( chandle );
7244 snd_card_next( &card );
7247 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
7248 if ( result == 0 ) {
7249 if ( nDevices == device ) {
7250 strcpy( name, "default" );
7256 if ( nDevices == 0 ) {
7257 errorText_ = "RtApiAlsa::getDeviceInfo: no devices found!";
7258 error( RtAudioError::INVALID_USE );
7262 if ( device >= nDevices ) {
7263 errorText_ = "RtApiAlsa::getDeviceInfo: device ID is invalid!";
7264 error( RtAudioError::INVALID_USE );
7270 // If a stream is already open, we cannot probe the stream devices.
7271 // Thus, use the saved results.
7272 if ( stream_.state != STREAM_CLOSED &&
7273 ( stream_.device[0] == device || stream_.device[1] == device ) ) {
7274 snd_ctl_close( chandle );
7275 if ( device >= devices_.size() ) {
7276 errorText_ = "RtApiAlsa::getDeviceInfo: device ID was not present before stream was opened.";
7277 error( RtAudioError::WARNING );
7280 return devices_[ device ];
7283 int openMode = SND_PCM_ASYNC;
7284 snd_pcm_stream_t stream;
7285 snd_pcm_info_t *pcminfo;
7286 snd_pcm_info_alloca( &pcminfo );
7288 snd_pcm_hw_params_t *params;
7289 snd_pcm_hw_params_alloca( ¶ms );
7291 // First try for playback unless default device (which has subdev -1)
7292 stream = SND_PCM_STREAM_PLAYBACK;
7293 snd_pcm_info_set_stream( pcminfo, stream );
7294 if ( subdevice != -1 ) {
7295 snd_pcm_info_set_device( pcminfo, subdevice );
7296 snd_pcm_info_set_subdevice( pcminfo, 0 );
7298 result = snd_ctl_pcm_info( chandle, pcminfo );
7300 // Device probably doesn't support playback.
7305 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK );
7307 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7308 errorText_ = errorStream_.str();
7309 error( RtAudioError::WARNING );
7313 // The device is open ... fill the parameter structure.
7314 result = snd_pcm_hw_params_any( phandle, params );
7316 snd_pcm_close( phandle );
7317 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7318 errorText_ = errorStream_.str();
7319 error( RtAudioError::WARNING );
7323 // Get output channel information.
7325 result = snd_pcm_hw_params_get_channels_max( params, &value );
7327 snd_pcm_close( phandle );
7328 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") output channels, " << snd_strerror( result ) << ".";
7329 errorText_ = errorStream_.str();
7330 error( RtAudioError::WARNING );
7333 info.outputChannels = value;
7334 snd_pcm_close( phandle );
7337 stream = SND_PCM_STREAM_CAPTURE;
7338 snd_pcm_info_set_stream( pcminfo, stream );
7340 // Now try for capture unless default device (with subdev = -1)
7341 if ( subdevice != -1 ) {
7342 result = snd_ctl_pcm_info( chandle, pcminfo );
7343 snd_ctl_close( chandle );
7345 // Device probably doesn't support capture.
7346 if ( info.outputChannels == 0 ) return info;
7347 goto probeParameters;
7351 snd_ctl_close( chandle );
7353 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
7355 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7356 errorText_ = errorStream_.str();
7357 error( RtAudioError::WARNING );
7358 if ( info.outputChannels == 0 ) return info;
7359 goto probeParameters;
7362 // The device is open ... fill the parameter structure.
7363 result = snd_pcm_hw_params_any( phandle, params );
7365 snd_pcm_close( phandle );
7366 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7367 errorText_ = errorStream_.str();
7368 error( RtAudioError::WARNING );
7369 if ( info.outputChannels == 0 ) return info;
7370 goto probeParameters;
7373 result = snd_pcm_hw_params_get_channels_max( params, &value );
7375 snd_pcm_close( phandle );
7376 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") input channels, " << snd_strerror( result ) << ".";
7377 errorText_ = errorStream_.str();
7378 error( RtAudioError::WARNING );
7379 if ( info.outputChannels == 0 ) return info;
7380 goto probeParameters;
7382 info.inputChannels = value;
7383 snd_pcm_close( phandle );
7385 // If device opens for both playback and capture, we determine the channels.
7386 if ( info.outputChannels > 0 && info.inputChannels > 0 )
7387 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
7389 // ALSA doesn't provide default devices so we'll use the first available one.
7390 if ( device == 0 && info.outputChannels > 0 )
7391 info.isDefaultOutput = true;
7392 if ( device == 0 && info.inputChannels > 0 )
7393 info.isDefaultInput = true;
7396 // At this point, we just need to figure out the supported data
7397 // formats and sample rates. We'll proceed by opening the device in
7398 // the direction with the maximum number of channels, or playback if
7399 // they are equal. This might limit our sample rate options, but so
7402 if ( info.outputChannels >= info.inputChannels )
7403 stream = SND_PCM_STREAM_PLAYBACK;
7405 stream = SND_PCM_STREAM_CAPTURE;
7406 snd_pcm_info_set_stream( pcminfo, stream );
7408 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
7410 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7411 errorText_ = errorStream_.str();
7412 error( RtAudioError::WARNING );
7416 // The device is open ... fill the parameter structure.
7417 result = snd_pcm_hw_params_any( phandle, params );
7419 snd_pcm_close( phandle );
7420 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7421 errorText_ = errorStream_.str();
7422 error( RtAudioError::WARNING );
7426 // Test our discrete set of sample rate values.
7427 info.sampleRates.clear();
7428 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
7429 if ( snd_pcm_hw_params_test_rate( phandle, params, SAMPLE_RATES[i], 0 ) == 0 ) {
7430 info.sampleRates.push_back( SAMPLE_RATES[i] );
7432 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
7433 info.preferredSampleRate = SAMPLE_RATES[i];
7436 if ( info.sampleRates.size() == 0 ) {
7437 snd_pcm_close( phandle );
7438 errorStream_ << "RtApiAlsa::getDeviceInfo: no supported sample rates found for device (" << name << ").";
7439 errorText_ = errorStream_.str();
7440 error( RtAudioError::WARNING );
7444 // Probe the supported data formats ... we don't care about endian-ness just yet
7445 snd_pcm_format_t format;
7446 info.nativeFormats = 0;
7447 format = SND_PCM_FORMAT_S8;
7448 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7449 info.nativeFormats |= RTAUDIO_SINT8;
7450 format = SND_PCM_FORMAT_S16;
7451 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7452 info.nativeFormats |= RTAUDIO_SINT16;
7453 format = SND_PCM_FORMAT_S24;
7454 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7455 info.nativeFormats |= RTAUDIO_SINT24;
7456 format = SND_PCM_FORMAT_S32;
7457 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7458 info.nativeFormats |= RTAUDIO_SINT32;
7459 format = SND_PCM_FORMAT_FLOAT;
7460 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7461 info.nativeFormats |= RTAUDIO_FLOAT32;
7462 format = SND_PCM_FORMAT_FLOAT64;
7463 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7464 info.nativeFormats |= RTAUDIO_FLOAT64;
7466 // Check that we have at least one supported format
7467 if ( info.nativeFormats == 0 ) {
7468 snd_pcm_close( phandle );
7469 errorStream_ << "RtApiAlsa::getDeviceInfo: pcm device (" << name << ") data format not supported by RtAudio.";
7470 errorText_ = errorStream_.str();
7471 error( RtAudioError::WARNING );
7475 // Get the device name
7477 result = snd_card_get_name( card, &cardname );
7478 if ( result >= 0 ) {
7479 sprintf( name, "hw:%s,%d", cardname, subdevice );
7484 // That's all ... close the device and return
7485 snd_pcm_close( phandle );
7490 void RtApiAlsa :: saveDeviceInfo( void )
7494 unsigned int nDevices = getDeviceCount();
7495 devices_.resize( nDevices );
7496 for ( unsigned int i=0; i<nDevices; i++ )
7497 devices_[i] = getDeviceInfo( i );
7500 bool RtApiAlsa :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
7501 unsigned int firstChannel, unsigned int sampleRate,
7502 RtAudioFormat format, unsigned int *bufferSize,
7503 RtAudio::StreamOptions *options )
7506 #if defined(__RTAUDIO_DEBUG__)
7508 snd_output_stdio_attach(&out, stderr, 0);
7511 // I'm not using the "plug" interface ... too much inconsistent behavior.
7513 unsigned nDevices = 0;
7514 int result, subdevice, card;
7518 if ( options && options->flags & RTAUDIO_ALSA_USE_DEFAULT )
7519 snprintf(name, sizeof(name), "%s", "default");
7521 // Count cards and devices
7523 snd_card_next( &card );
7524 while ( card >= 0 ) {
7525 sprintf( name, "hw:%d", card );
7526 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
7528 errorStream_ << "RtApiAlsa::probeDeviceOpen: control open, card = " << card << ", " << snd_strerror( result ) << ".";
7529 errorText_ = errorStream_.str();
7534 result = snd_ctl_pcm_next_device( chandle, &subdevice );
7535 if ( result < 0 ) break;
7536 if ( subdevice < 0 ) break;
7537 if ( nDevices == device ) {
7538 sprintf( name, "hw:%d,%d", card, subdevice );
7539 snd_ctl_close( chandle );
7544 snd_ctl_close( chandle );
7545 snd_card_next( &card );
7548 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
7549 if ( result == 0 ) {
7550 if ( nDevices == device ) {
7551 strcpy( name, "default" );
7552 snd_ctl_close( chandle );
7557 snd_ctl_close( chandle );
7559 if ( nDevices == 0 ) {
7560 // This should not happen because a check is made before this function is called.
7561 errorText_ = "RtApiAlsa::probeDeviceOpen: no devices found!";
7565 if ( device >= nDevices ) {
7566 // This should not happen because a check is made before this function is called.
7567 errorText_ = "RtApiAlsa::probeDeviceOpen: device ID is invalid!";
7574 // The getDeviceInfo() function will not work for a device that is
7575 // already open. Thus, we'll probe the system before opening a
7576 // stream and save the results for use by getDeviceInfo().
7577 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) // only do once
7578 this->saveDeviceInfo();
7580 snd_pcm_stream_t stream;
7581 if ( mode == OUTPUT )
7582 stream = SND_PCM_STREAM_PLAYBACK;
7584 stream = SND_PCM_STREAM_CAPTURE;
7587 int openMode = SND_PCM_ASYNC;
7588 result = snd_pcm_open( &phandle, name, stream, openMode );
7590 if ( mode == OUTPUT )
7591 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for output.";
7593 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for input.";
7594 errorText_ = errorStream_.str();
7598 // Fill the parameter structure.
7599 snd_pcm_hw_params_t *hw_params;
7600 snd_pcm_hw_params_alloca( &hw_params );
7601 result = snd_pcm_hw_params_any( phandle, hw_params );
7603 snd_pcm_close( phandle );
7604 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") parameters, " << snd_strerror( result ) << ".";
7605 errorText_ = errorStream_.str();
7609 #if defined(__RTAUDIO_DEBUG__)
7610 fprintf( stderr, "\nRtApiAlsa: dump hardware params just after device open:\n\n" );
7611 snd_pcm_hw_params_dump( hw_params, out );
7614 // Set access ... check user preference.
7615 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) {
7616 stream_.userInterleaved = false;
7617 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
7619 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
7620 stream_.deviceInterleaved[mode] = true;
7623 stream_.deviceInterleaved[mode] = false;
7626 stream_.userInterleaved = true;
7627 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
7629 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
7630 stream_.deviceInterleaved[mode] = false;
7633 stream_.deviceInterleaved[mode] = true;
7637 snd_pcm_close( phandle );
7638 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") access, " << snd_strerror( result ) << ".";
7639 errorText_ = errorStream_.str();
7643 // Determine how to set the device format.
7644 stream_.userFormat = format;
7645 snd_pcm_format_t deviceFormat = SND_PCM_FORMAT_UNKNOWN;
7647 if ( format == RTAUDIO_SINT8 )
7648 deviceFormat = SND_PCM_FORMAT_S8;
7649 else if ( format == RTAUDIO_SINT16 )
7650 deviceFormat = SND_PCM_FORMAT_S16;
7651 else if ( format == RTAUDIO_SINT24 )
7652 deviceFormat = SND_PCM_FORMAT_S24;
7653 else if ( format == RTAUDIO_SINT32 )
7654 deviceFormat = SND_PCM_FORMAT_S32;
7655 else if ( format == RTAUDIO_FLOAT32 )
7656 deviceFormat = SND_PCM_FORMAT_FLOAT;
7657 else if ( format == RTAUDIO_FLOAT64 )
7658 deviceFormat = SND_PCM_FORMAT_FLOAT64;
7660 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat) == 0) {
7661 stream_.deviceFormat[mode] = format;
7665 // The user requested format is not natively supported by the device.
7666 deviceFormat = SND_PCM_FORMAT_FLOAT64;
7667 if ( snd_pcm_hw_params_test_format( phandle, hw_params, deviceFormat ) == 0 ) {
7668 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
7672 deviceFormat = SND_PCM_FORMAT_FLOAT;
7673 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7674 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
7678 deviceFormat = SND_PCM_FORMAT_S32;
7679 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7680 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
7684 deviceFormat = SND_PCM_FORMAT_S24;
7685 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7686 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
7690 deviceFormat = SND_PCM_FORMAT_S16;
7691 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7692 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
7696 deviceFormat = SND_PCM_FORMAT_S8;
7697 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7698 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
7702 // If we get here, no supported format was found.
7703 snd_pcm_close( phandle );
7704 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device " << device << " data format not supported by RtAudio.";
7705 errorText_ = errorStream_.str();
7709 result = snd_pcm_hw_params_set_format( phandle, hw_params, deviceFormat );
7711 snd_pcm_close( phandle );
7712 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") data format, " << snd_strerror( result ) << ".";
7713 errorText_ = errorStream_.str();
7717 // Determine whether byte-swaping is necessary.
7718 stream_.doByteSwap[mode] = false;
7719 if ( deviceFormat != SND_PCM_FORMAT_S8 ) {
7720 result = snd_pcm_format_cpu_endian( deviceFormat );
7722 stream_.doByteSwap[mode] = true;
7723 else if (result < 0) {
7724 snd_pcm_close( phandle );
7725 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") endian-ness, " << snd_strerror( result ) << ".";
7726 errorText_ = errorStream_.str();
7731 // Set the sample rate.
7732 result = snd_pcm_hw_params_set_rate_near( phandle, hw_params, (unsigned int*) &sampleRate, 0 );
7734 snd_pcm_close( phandle );
7735 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting sample rate on device (" << name << "), " << snd_strerror( result ) << ".";
7736 errorText_ = errorStream_.str();
7740 // Determine the number of channels for this device. We support a possible
7741 // minimum device channel number > than the value requested by the user.
7742 stream_.nUserChannels[mode] = channels;
7744 result = snd_pcm_hw_params_get_channels_max( hw_params, &value );
7745 unsigned int deviceChannels = value;
7746 if ( result < 0 || deviceChannels < channels + firstChannel ) {
7747 snd_pcm_close( phandle );
7748 errorStream_ << "RtApiAlsa::probeDeviceOpen: requested channel parameters not supported by device (" << name << "), " << snd_strerror( result ) << ".";
7749 errorText_ = errorStream_.str();
7753 result = snd_pcm_hw_params_get_channels_min( hw_params, &value );
7755 snd_pcm_close( phandle );
7756 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting minimum channels for device (" << name << "), " << snd_strerror( result ) << ".";
7757 errorText_ = errorStream_.str();
7760 deviceChannels = value;
7761 if ( deviceChannels < channels + firstChannel ) deviceChannels = channels + firstChannel;
7762 stream_.nDeviceChannels[mode] = deviceChannels;
7764 // Set the device channels.
7765 result = snd_pcm_hw_params_set_channels( phandle, hw_params, deviceChannels );
7767 snd_pcm_close( phandle );
7768 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting channels for device (" << name << "), " << snd_strerror( result ) << ".";
7769 errorText_ = errorStream_.str();
7773 // Set the buffer (or period) size.
7775 snd_pcm_uframes_t periodSize = *bufferSize;
7776 result = snd_pcm_hw_params_set_period_size_near( phandle, hw_params, &periodSize, &dir );
7778 snd_pcm_close( phandle );
7779 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting period size for device (" << name << "), " << snd_strerror( result ) << ".";
7780 errorText_ = errorStream_.str();
7783 *bufferSize = periodSize;
7785 // Set the buffer number, which in ALSA is referred to as the "period".
7786 unsigned int periods = 0;
7787 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) periods = 2;
7788 if ( options && options->numberOfBuffers > 0 ) periods = options->numberOfBuffers;
7789 if ( periods < 2 ) periods = 4; // a fairly safe default value
7790 result = snd_pcm_hw_params_set_periods_near( phandle, hw_params, &periods, &dir );
7792 snd_pcm_close( phandle );
7793 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting periods for device (" << name << "), " << snd_strerror( result ) << ".";
7794 errorText_ = errorStream_.str();
7798 // If attempting to setup a duplex stream, the bufferSize parameter
7799 // MUST be the same in both directions!
7800 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
7801 snd_pcm_close( phandle );
7802 errorStream_ << "RtApiAlsa::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << name << ").";
7803 errorText_ = errorStream_.str();
7807 stream_.bufferSize = *bufferSize;
7809 // Install the hardware configuration
7810 result = snd_pcm_hw_params( phandle, hw_params );
7812 snd_pcm_close( phandle );
7813 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing hardware configuration on device (" << name << "), " << snd_strerror( result ) << ".";
7814 errorText_ = errorStream_.str();
7818 #if defined(__RTAUDIO_DEBUG__)
7819 fprintf(stderr, "\nRtApiAlsa: dump hardware params after installation:\n\n");
7820 snd_pcm_hw_params_dump( hw_params, out );
7823 // Set the software configuration to fill buffers with zeros and prevent device stopping on xruns.
7824 snd_pcm_sw_params_t *sw_params = NULL;
7825 snd_pcm_sw_params_alloca( &sw_params );
7826 snd_pcm_sw_params_current( phandle, sw_params );
7827 snd_pcm_sw_params_set_start_threshold( phandle, sw_params, *bufferSize );
7828 snd_pcm_sw_params_set_stop_threshold( phandle, sw_params, ULONG_MAX );
7829 snd_pcm_sw_params_set_silence_threshold( phandle, sw_params, 0 );
7831 // The following two settings were suggested by Theo Veenker
7832 //snd_pcm_sw_params_set_avail_min( phandle, sw_params, *bufferSize );
7833 //snd_pcm_sw_params_set_xfer_align( phandle, sw_params, 1 );
7835 // here are two options for a fix
7836 //snd_pcm_sw_params_set_silence_size( phandle, sw_params, ULONG_MAX );
7837 snd_pcm_uframes_t val;
7838 snd_pcm_sw_params_get_boundary( sw_params, &val );
7839 snd_pcm_sw_params_set_silence_size( phandle, sw_params, val );
7841 result = snd_pcm_sw_params( phandle, sw_params );
7843 snd_pcm_close( phandle );
7844 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing software configuration on device (" << name << "), " << snd_strerror( result ) << ".";
7845 errorText_ = errorStream_.str();
7849 #if defined(__RTAUDIO_DEBUG__)
7850 fprintf(stderr, "\nRtApiAlsa: dump software params after installation:\n\n");
7851 snd_pcm_sw_params_dump( sw_params, out );
7854 // Set flags for buffer conversion
7855 stream_.doConvertBuffer[mode] = false;
7856 if ( stream_.userFormat != stream_.deviceFormat[mode] )
7857 stream_.doConvertBuffer[mode] = true;
7858 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
7859 stream_.doConvertBuffer[mode] = true;
7860 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
7861 stream_.nUserChannels[mode] > 1 )
7862 stream_.doConvertBuffer[mode] = true;
7864 // Allocate the ApiHandle if necessary and then save.
7865 AlsaHandle *apiInfo = 0;
7866 if ( stream_.apiHandle == 0 ) {
7868 apiInfo = (AlsaHandle *) new AlsaHandle;
7870 catch ( std::bad_alloc& ) {
7871 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating AlsaHandle memory.";
7875 if ( pthread_cond_init( &apiInfo->runnable_cv, NULL ) ) {
7876 errorText_ = "RtApiAlsa::probeDeviceOpen: error initializing pthread condition variable.";
7880 stream_.apiHandle = (void *) apiInfo;
7881 apiInfo->handles[0] = 0;
7882 apiInfo->handles[1] = 0;
7885 apiInfo = (AlsaHandle *) stream_.apiHandle;
7887 apiInfo->handles[mode] = phandle;
7890 // Allocate necessary internal buffers.
7891 unsigned long bufferBytes;
7892 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
7893 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
7894 if ( stream_.userBuffer[mode] == NULL ) {
7895 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating user buffer memory.";
7899 if ( stream_.doConvertBuffer[mode] ) {
7901 bool makeBuffer = true;
7902 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
7903 if ( mode == INPUT ) {
7904 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
7905 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
7906 if ( bufferBytes <= bytesOut ) makeBuffer = false;
7911 bufferBytes *= *bufferSize;
7912 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
7913 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
7914 if ( stream_.deviceBuffer == NULL ) {
7915 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating device buffer memory.";
7921 stream_.sampleRate = sampleRate;
7922 stream_.nBuffers = periods;
7923 stream_.device[mode] = device;
7924 stream_.state = STREAM_STOPPED;
7926 // Setup the buffer conversion information structure.
7927 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
7929 // Setup thread if necessary.
7930 if ( stream_.mode == OUTPUT && mode == INPUT ) {
7931 // We had already set up an output stream.
7932 stream_.mode = DUPLEX;
7933 // Link the streams if possible.
7934 apiInfo->synchronized = false;
7935 if ( snd_pcm_link( apiInfo->handles[0], apiInfo->handles[1] ) == 0 )
7936 apiInfo->synchronized = true;
7938 errorText_ = "RtApiAlsa::probeDeviceOpen: unable to synchronize input and output devices.";
7939 error( RtAudioError::WARNING );
7943 stream_.mode = mode;
7945 // Setup callback thread.
7946 stream_.callbackInfo.object = (void *) this;
7948 // Set the thread attributes for joinable and realtime scheduling
7949 // priority (optional). The higher priority will only take affect
7950 // if the program is run as root or suid. Note, under Linux
7951 // processes with CAP_SYS_NICE privilege, a user can change
7952 // scheduling policy and priority (thus need not be root). See
7953 // POSIX "capabilities".
7954 pthread_attr_t attr;
7955 pthread_attr_init( &attr );
7956 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
7957 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
7958 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
7959 stream_.callbackInfo.doRealtime = true;
7960 struct sched_param param;
7961 int priority = options->priority;
7962 int min = sched_get_priority_min( SCHED_RR );
7963 int max = sched_get_priority_max( SCHED_RR );
7964 if ( priority < min ) priority = min;
7965 else if ( priority > max ) priority = max;
7966 param.sched_priority = priority;
7968 // Set the policy BEFORE the priority. Otherwise it fails.
7969 pthread_attr_setschedpolicy(&attr, SCHED_RR);
7970 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
7971 // This is definitely required. Otherwise it fails.
7972 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
7973 pthread_attr_setschedparam(&attr, ¶m);
7976 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
7978 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
7981 stream_.callbackInfo.isRunning = true;
7982 result = pthread_create( &stream_.callbackInfo.thread, &attr, alsaCallbackHandler, &stream_.callbackInfo );
7983 pthread_attr_destroy( &attr );
7985 // Failed. Try instead with default attributes.
7986 result = pthread_create( &stream_.callbackInfo.thread, NULL, alsaCallbackHandler, &stream_.callbackInfo );
7988 stream_.callbackInfo.isRunning = false;
7989 errorText_ = "RtApiAlsa::error creating callback thread!";
7999 pthread_cond_destroy( &apiInfo->runnable_cv );
8000 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
8001 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
8003 stream_.apiHandle = 0;
8006 if ( phandle) snd_pcm_close( phandle );
8008 for ( int i=0; i<2; i++ ) {
8009 if ( stream_.userBuffer[i] ) {
8010 free( stream_.userBuffer[i] );
8011 stream_.userBuffer[i] = 0;
8015 if ( stream_.deviceBuffer ) {
8016 free( stream_.deviceBuffer );
8017 stream_.deviceBuffer = 0;
8020 stream_.state = STREAM_CLOSED;
8024 void RtApiAlsa :: closeStream()
8026 if ( stream_.state == STREAM_CLOSED ) {
8027 errorText_ = "RtApiAlsa::closeStream(): no open stream to close!";
8028 error( RtAudioError::WARNING );
8032 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8033 stream_.callbackInfo.isRunning = false;
8034 MUTEX_LOCK( &stream_.mutex );
8035 if ( stream_.state == STREAM_STOPPED ) {
8036 apiInfo->runnable = true;
8037 pthread_cond_signal( &apiInfo->runnable_cv );
8039 MUTEX_UNLOCK( &stream_.mutex );
8040 pthread_join( stream_.callbackInfo.thread, NULL );
8042 if ( stream_.state == STREAM_RUNNING ) {
8043 stream_.state = STREAM_STOPPED;
8044 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
8045 snd_pcm_drop( apiInfo->handles[0] );
8046 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
8047 snd_pcm_drop( apiInfo->handles[1] );
8051 pthread_cond_destroy( &apiInfo->runnable_cv );
8052 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
8053 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
8055 stream_.apiHandle = 0;
8058 for ( int i=0; i<2; i++ ) {
8059 if ( stream_.userBuffer[i] ) {
8060 free( stream_.userBuffer[i] );
8061 stream_.userBuffer[i] = 0;
8065 if ( stream_.deviceBuffer ) {
8066 free( stream_.deviceBuffer );
8067 stream_.deviceBuffer = 0;
8070 stream_.mode = UNINITIALIZED;
8071 stream_.state = STREAM_CLOSED;
8074 void RtApiAlsa :: startStream()
8076 // This method calls snd_pcm_prepare if the device isn't already in that state.
8079 if ( stream_.state == STREAM_RUNNING ) {
8080 errorText_ = "RtApiAlsa::startStream(): the stream is already running!";
8081 error( RtAudioError::WARNING );
8085 MUTEX_LOCK( &stream_.mutex );
8088 snd_pcm_state_t state;
8089 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8090 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
8091 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8092 state = snd_pcm_state( handle[0] );
8093 if ( state != SND_PCM_STATE_PREPARED ) {
8094 result = snd_pcm_prepare( handle[0] );
8096 errorStream_ << "RtApiAlsa::startStream: error preparing output pcm device, " << snd_strerror( result ) << ".";
8097 errorText_ = errorStream_.str();
8103 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
8104 result = snd_pcm_drop(handle[1]); // fix to remove stale data received since device has been open
8105 state = snd_pcm_state( handle[1] );
8106 if ( state != SND_PCM_STATE_PREPARED ) {
8107 result = snd_pcm_prepare( handle[1] );
8109 errorStream_ << "RtApiAlsa::startStream: error preparing input pcm device, " << snd_strerror( result ) << ".";
8110 errorText_ = errorStream_.str();
8116 stream_.state = STREAM_RUNNING;
8119 apiInfo->runnable = true;
8120 pthread_cond_signal( &apiInfo->runnable_cv );
8121 MUTEX_UNLOCK( &stream_.mutex );
8123 if ( result >= 0 ) return;
8124 error( RtAudioError::SYSTEM_ERROR );
8127 void RtApiAlsa :: stopStream()
8130 if ( stream_.state == STREAM_STOPPED ) {
8131 errorText_ = "RtApiAlsa::stopStream(): the stream is already stopped!";
8132 error( RtAudioError::WARNING );
8136 stream_.state = STREAM_STOPPED;
8137 MUTEX_LOCK( &stream_.mutex );
8140 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8141 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
8142 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8143 if ( apiInfo->synchronized )
8144 result = snd_pcm_drop( handle[0] );
8146 result = snd_pcm_drain( handle[0] );
8148 errorStream_ << "RtApiAlsa::stopStream: error draining output pcm device, " << snd_strerror( result ) << ".";
8149 errorText_ = errorStream_.str();
8154 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
8155 result = snd_pcm_drop( handle[1] );
8157 errorStream_ << "RtApiAlsa::stopStream: error stopping input pcm device, " << snd_strerror( result ) << ".";
8158 errorText_ = errorStream_.str();
8164 apiInfo->runnable = false; // fixes high CPU usage when stopped
8165 MUTEX_UNLOCK( &stream_.mutex );
8167 if ( result >= 0 ) return;
8168 error( RtAudioError::SYSTEM_ERROR );
8171 void RtApiAlsa :: abortStream()
8174 if ( stream_.state == STREAM_STOPPED ) {
8175 errorText_ = "RtApiAlsa::abortStream(): the stream is already stopped!";
8176 error( RtAudioError::WARNING );
8180 stream_.state = STREAM_STOPPED;
8181 MUTEX_LOCK( &stream_.mutex );
8184 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8185 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
8186 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8187 result = snd_pcm_drop( handle[0] );
8189 errorStream_ << "RtApiAlsa::abortStream: error aborting output pcm device, " << snd_strerror( result ) << ".";
8190 errorText_ = errorStream_.str();
8195 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
8196 result = snd_pcm_drop( handle[1] );
8198 errorStream_ << "RtApiAlsa::abortStream: error aborting input pcm device, " << snd_strerror( result ) << ".";
8199 errorText_ = errorStream_.str();
8205 apiInfo->runnable = false; // fixes high CPU usage when stopped
8206 MUTEX_UNLOCK( &stream_.mutex );
8208 if ( result >= 0 ) return;
8209 error( RtAudioError::SYSTEM_ERROR );
8212 void RtApiAlsa :: callbackEvent()
8214 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8215 if ( stream_.state == STREAM_STOPPED ) {
8216 MUTEX_LOCK( &stream_.mutex );
8217 while ( !apiInfo->runnable )
8218 pthread_cond_wait( &apiInfo->runnable_cv, &stream_.mutex );
8220 if ( stream_.state != STREAM_RUNNING ) {
8221 MUTEX_UNLOCK( &stream_.mutex );
8224 MUTEX_UNLOCK( &stream_.mutex );
8227 if ( stream_.state == STREAM_CLOSED ) {
8228 errorText_ = "RtApiAlsa::callbackEvent(): the stream is closed ... this shouldn't happen!";
8229 error( RtAudioError::WARNING );
8233 int doStopStream = 0;
8234 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
8235 double streamTime = getStreamTime();
8236 RtAudioStreamStatus status = 0;
8237 if ( stream_.mode != INPUT && apiInfo->xrun[0] == true ) {
8238 status |= RTAUDIO_OUTPUT_UNDERFLOW;
8239 apiInfo->xrun[0] = false;
8241 if ( stream_.mode != OUTPUT && apiInfo->xrun[1] == true ) {
8242 status |= RTAUDIO_INPUT_OVERFLOW;
8243 apiInfo->xrun[1] = false;
8245 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
8246 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
8248 if ( doStopStream == 2 ) {
8253 MUTEX_LOCK( &stream_.mutex );
8255 // The state might change while waiting on a mutex.
8256 if ( stream_.state == STREAM_STOPPED ) goto unlock;
8262 snd_pcm_sframes_t frames;
8263 RtAudioFormat format;
8264 handle = (snd_pcm_t **) apiInfo->handles;
8266 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
8268 // Setup parameters.
8269 if ( stream_.doConvertBuffer[1] ) {
8270 buffer = stream_.deviceBuffer;
8271 channels = stream_.nDeviceChannels[1];
8272 format = stream_.deviceFormat[1];
8275 buffer = stream_.userBuffer[1];
8276 channels = stream_.nUserChannels[1];
8277 format = stream_.userFormat;
8280 // Read samples from device in interleaved/non-interleaved format.
8281 if ( stream_.deviceInterleaved[1] )
8282 result = snd_pcm_readi( handle[1], buffer, stream_.bufferSize );
8284 void *bufs[channels];
8285 size_t offset = stream_.bufferSize * formatBytes( format );
8286 for ( int i=0; i<channels; i++ )
8287 bufs[i] = (void *) (buffer + (i * offset));
8288 result = snd_pcm_readn( handle[1], bufs, stream_.bufferSize );
8291 if ( result < (int) stream_.bufferSize ) {
8292 // Either an error or overrun occured.
8293 if ( result == -EPIPE ) {
8294 snd_pcm_state_t state = snd_pcm_state( handle[1] );
8295 if ( state == SND_PCM_STATE_XRUN ) {
8296 apiInfo->xrun[1] = true;
8297 result = snd_pcm_prepare( handle[1] );
8299 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after overrun, " << snd_strerror( result ) << ".";
8300 errorText_ = errorStream_.str();
8304 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
8305 errorText_ = errorStream_.str();
8309 errorStream_ << "RtApiAlsa::callbackEvent: audio read error, " << snd_strerror( result ) << ".";
8310 errorText_ = errorStream_.str();
8312 error( RtAudioError::WARNING );
8316 // Do byte swapping if necessary.
8317 if ( stream_.doByteSwap[1] )
8318 byteSwapBuffer( buffer, stream_.bufferSize * channels, format );
8320 // Do buffer conversion if necessary.
8321 if ( stream_.doConvertBuffer[1] )
8322 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
8324 // Check stream latency
8325 result = snd_pcm_delay( handle[1], &frames );
8326 if ( result == 0 && frames > 0 ) stream_.latency[1] = frames;
8331 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8333 // Setup parameters and do buffer conversion if necessary.
8334 if ( stream_.doConvertBuffer[0] ) {
8335 buffer = stream_.deviceBuffer;
8336 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
8337 channels = stream_.nDeviceChannels[0];
8338 format = stream_.deviceFormat[0];
8341 buffer = stream_.userBuffer[0];
8342 channels = stream_.nUserChannels[0];
8343 format = stream_.userFormat;
8346 // Do byte swapping if necessary.
8347 if ( stream_.doByteSwap[0] )
8348 byteSwapBuffer(buffer, stream_.bufferSize * channels, format);
8350 // Write samples to device in interleaved/non-interleaved format.
8351 if ( stream_.deviceInterleaved[0] )
8352 result = snd_pcm_writei( handle[0], buffer, stream_.bufferSize );
8354 void *bufs[channels];
8355 size_t offset = stream_.bufferSize * formatBytes( format );
8356 for ( int i=0; i<channels; i++ )
8357 bufs[i] = (void *) (buffer + (i * offset));
8358 result = snd_pcm_writen( handle[0], bufs, stream_.bufferSize );
8361 if ( result < (int) stream_.bufferSize ) {
8362 // Either an error or underrun occured.
8363 if ( result == -EPIPE ) {
8364 snd_pcm_state_t state = snd_pcm_state( handle[0] );
8365 if ( state == SND_PCM_STATE_XRUN ) {
8366 apiInfo->xrun[0] = true;
8367 result = snd_pcm_prepare( handle[0] );
8369 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after underrun, " << snd_strerror( result ) << ".";
8370 errorText_ = errorStream_.str();
8373 errorText_ = "RtApiAlsa::callbackEvent: audio write error, underrun.";
8376 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
8377 errorText_ = errorStream_.str();
8381 errorStream_ << "RtApiAlsa::callbackEvent: audio write error, " << snd_strerror( result ) << ".";
8382 errorText_ = errorStream_.str();
8384 error( RtAudioError::WARNING );
8388 // Check stream latency
8389 result = snd_pcm_delay( handle[0], &frames );
8390 if ( result == 0 && frames > 0 ) stream_.latency[0] = frames;
8394 MUTEX_UNLOCK( &stream_.mutex );
8396 RtApi::tickStreamTime();
8397 if ( doStopStream == 1 ) this->stopStream();
8400 static void *alsaCallbackHandler( void *ptr )
8402 CallbackInfo *info = (CallbackInfo *) ptr;
8403 RtApiAlsa *object = (RtApiAlsa *) info->object;
8404 bool *isRunning = &info->isRunning;
8406 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
8407 if ( info->doRealtime ) {
8408 std::cerr << "RtAudio alsa: " <<
8409 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
8410 "running realtime scheduling" << std::endl;
8414 while ( *isRunning == true ) {
8415 pthread_testcancel();
8416 object->callbackEvent();
8419 pthread_exit( NULL );
8422 //******************** End of __LINUX_ALSA__ *********************//
8425 #if defined(__LINUX_PULSE__)
8427 // Code written by Peter Meerwald, pmeerw@pmeerw.net
8428 // and Tristan Matthews.
8430 #include <pulse/error.h>
8431 #include <pulse/simple.h>
8434 static const unsigned int SUPPORTED_SAMPLERATES[] = { 8000, 16000, 22050, 32000,
8435 44100, 48000, 96000, 0};
8437 struct rtaudio_pa_format_mapping_t {
8438 RtAudioFormat rtaudio_format;
8439 pa_sample_format_t pa_format;
8442 static const rtaudio_pa_format_mapping_t supported_sampleformats[] = {
8443 {RTAUDIO_SINT16, PA_SAMPLE_S16LE},
8444 {RTAUDIO_SINT32, PA_SAMPLE_S32LE},
8445 {RTAUDIO_FLOAT32, PA_SAMPLE_FLOAT32LE},
8446 {0, PA_SAMPLE_INVALID}};
8448 struct PulseAudioHandle {
8452 pthread_cond_t runnable_cv;
8454 PulseAudioHandle() : s_play(0), s_rec(0), runnable(false) { }
8457 RtApiPulse::~RtApiPulse()
8459 if ( stream_.state != STREAM_CLOSED )
8463 unsigned int RtApiPulse::getDeviceCount( void )
8468 RtAudio::DeviceInfo RtApiPulse::getDeviceInfo( unsigned int /*device*/ )
8470 RtAudio::DeviceInfo info;
8472 info.name = "PulseAudio";
8473 info.outputChannels = 2;
8474 info.inputChannels = 2;
8475 info.duplexChannels = 2;
8476 info.isDefaultOutput = true;
8477 info.isDefaultInput = true;
8479 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr )
8480 info.sampleRates.push_back( *sr );
8482 info.preferredSampleRate = 48000;
8483 info.nativeFormats = RTAUDIO_SINT16 | RTAUDIO_SINT32 | RTAUDIO_FLOAT32;
8488 static void *pulseaudio_callback( void * user )
8490 CallbackInfo *cbi = static_cast<CallbackInfo *>( user );
8491 RtApiPulse *context = static_cast<RtApiPulse *>( cbi->object );
8492 volatile bool *isRunning = &cbi->isRunning;
8494 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
8495 if (cbi->doRealtime) {
8496 std::cerr << "RtAudio pulse: " <<
8497 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
8498 "running realtime scheduling" << std::endl;
8502 while ( *isRunning ) {
8503 pthread_testcancel();
8504 context->callbackEvent();
8507 pthread_exit( NULL );
8510 void RtApiPulse::closeStream( void )
8512 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8514 stream_.callbackInfo.isRunning = false;
8516 MUTEX_LOCK( &stream_.mutex );
8517 if ( stream_.state == STREAM_STOPPED ) {
8518 pah->runnable = true;
8519 pthread_cond_signal( &pah->runnable_cv );
8521 MUTEX_UNLOCK( &stream_.mutex );
8523 pthread_join( pah->thread, 0 );
8524 if ( pah->s_play ) {
8525 pa_simple_flush( pah->s_play, NULL );
8526 pa_simple_free( pah->s_play );
8529 pa_simple_free( pah->s_rec );
8531 pthread_cond_destroy( &pah->runnable_cv );
8533 stream_.apiHandle = 0;
8536 if ( stream_.userBuffer[0] ) {
8537 free( stream_.userBuffer[0] );
8538 stream_.userBuffer[0] = 0;
8540 if ( stream_.userBuffer[1] ) {
8541 free( stream_.userBuffer[1] );
8542 stream_.userBuffer[1] = 0;
8545 stream_.state = STREAM_CLOSED;
8546 stream_.mode = UNINITIALIZED;
8549 void RtApiPulse::callbackEvent( void )
8551 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8553 if ( stream_.state == STREAM_STOPPED ) {
8554 MUTEX_LOCK( &stream_.mutex );
8555 while ( !pah->runnable )
8556 pthread_cond_wait( &pah->runnable_cv, &stream_.mutex );
8558 if ( stream_.state != STREAM_RUNNING ) {
8559 MUTEX_UNLOCK( &stream_.mutex );
8562 MUTEX_UNLOCK( &stream_.mutex );
8565 if ( stream_.state == STREAM_CLOSED ) {
8566 errorText_ = "RtApiPulse::callbackEvent(): the stream is closed ... "
8567 "this shouldn't happen!";
8568 error( RtAudioError::WARNING );
8572 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
8573 double streamTime = getStreamTime();
8574 RtAudioStreamStatus status = 0;
8575 int doStopStream = callback( stream_.userBuffer[OUTPUT], stream_.userBuffer[INPUT],
8576 stream_.bufferSize, streamTime, status,
8577 stream_.callbackInfo.userData );
8579 if ( doStopStream == 2 ) {
8584 MUTEX_LOCK( &stream_.mutex );
8585 void *pulse_in = stream_.doConvertBuffer[INPUT] ? stream_.deviceBuffer : stream_.userBuffer[INPUT];
8586 void *pulse_out = stream_.doConvertBuffer[OUTPUT] ? stream_.deviceBuffer : stream_.userBuffer[OUTPUT];
8588 if ( stream_.state != STREAM_RUNNING )
8593 if (stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8594 if ( stream_.doConvertBuffer[OUTPUT] ) {
8595 convertBuffer( stream_.deviceBuffer,
8596 stream_.userBuffer[OUTPUT],
8597 stream_.convertInfo[OUTPUT] );
8598 bytes = stream_.nDeviceChannels[OUTPUT] * stream_.bufferSize *
8599 formatBytes( stream_.deviceFormat[OUTPUT] );
8601 bytes = stream_.nUserChannels[OUTPUT] * stream_.bufferSize *
8602 formatBytes( stream_.userFormat );
8604 if ( pa_simple_write( pah->s_play, pulse_out, bytes, &pa_error ) < 0 ) {
8605 errorStream_ << "RtApiPulse::callbackEvent: audio write error, " <<
8606 pa_strerror( pa_error ) << ".";
8607 errorText_ = errorStream_.str();
8608 error( RtAudioError::WARNING );
8612 if ( stream_.mode == INPUT || stream_.mode == DUPLEX) {
8613 if ( stream_.doConvertBuffer[INPUT] )
8614 bytes = stream_.nDeviceChannels[INPUT] * stream_.bufferSize *
8615 formatBytes( stream_.deviceFormat[INPUT] );
8617 bytes = stream_.nUserChannels[INPUT] * stream_.bufferSize *
8618 formatBytes( stream_.userFormat );
8620 if ( pa_simple_read( pah->s_rec, pulse_in, bytes, &pa_error ) < 0 ) {
8621 errorStream_ << "RtApiPulse::callbackEvent: audio read error, " <<
8622 pa_strerror( pa_error ) << ".";
8623 errorText_ = errorStream_.str();
8624 error( RtAudioError::WARNING );
8626 if ( stream_.doConvertBuffer[INPUT] ) {
8627 convertBuffer( stream_.userBuffer[INPUT],
8628 stream_.deviceBuffer,
8629 stream_.convertInfo[INPUT] );
8634 MUTEX_UNLOCK( &stream_.mutex );
8635 RtApi::tickStreamTime();
8637 if ( doStopStream == 1 )
8641 void RtApiPulse::startStream( void )
8643 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8645 if ( stream_.state == STREAM_CLOSED ) {
8646 errorText_ = "RtApiPulse::startStream(): the stream is not open!";
8647 error( RtAudioError::INVALID_USE );
8650 if ( stream_.state == STREAM_RUNNING ) {
8651 errorText_ = "RtApiPulse::startStream(): the stream is already running!";
8652 error( RtAudioError::WARNING );
8656 MUTEX_LOCK( &stream_.mutex );
8658 stream_.state = STREAM_RUNNING;
8660 pah->runnable = true;
8661 pthread_cond_signal( &pah->runnable_cv );
8662 MUTEX_UNLOCK( &stream_.mutex );
8665 void RtApiPulse::stopStream( void )
8667 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8669 if ( stream_.state == STREAM_CLOSED ) {
8670 errorText_ = "RtApiPulse::stopStream(): the stream is not open!";
8671 error( RtAudioError::INVALID_USE );
8674 if ( stream_.state == STREAM_STOPPED ) {
8675 errorText_ = "RtApiPulse::stopStream(): the stream is already stopped!";
8676 error( RtAudioError::WARNING );
8680 stream_.state = STREAM_STOPPED;
8681 MUTEX_LOCK( &stream_.mutex );
8683 if ( pah && pah->s_play ) {
8685 if ( pa_simple_drain( pah->s_play, &pa_error ) < 0 ) {
8686 errorStream_ << "RtApiPulse::stopStream: error draining output device, " <<
8687 pa_strerror( pa_error ) << ".";
8688 errorText_ = errorStream_.str();
8689 MUTEX_UNLOCK( &stream_.mutex );
8690 error( RtAudioError::SYSTEM_ERROR );
8695 stream_.state = STREAM_STOPPED;
8696 MUTEX_UNLOCK( &stream_.mutex );
8699 void RtApiPulse::abortStream( void )
8701 PulseAudioHandle *pah = static_cast<PulseAudioHandle*>( stream_.apiHandle );
8703 if ( stream_.state == STREAM_CLOSED ) {
8704 errorText_ = "RtApiPulse::abortStream(): the stream is not open!";
8705 error( RtAudioError::INVALID_USE );
8708 if ( stream_.state == STREAM_STOPPED ) {
8709 errorText_ = "RtApiPulse::abortStream(): the stream is already stopped!";
8710 error( RtAudioError::WARNING );
8714 stream_.state = STREAM_STOPPED;
8715 MUTEX_LOCK( &stream_.mutex );
8717 if ( pah && pah->s_play ) {
8719 if ( pa_simple_flush( pah->s_play, &pa_error ) < 0 ) {
8720 errorStream_ << "RtApiPulse::abortStream: error flushing output device, " <<
8721 pa_strerror( pa_error ) << ".";
8722 errorText_ = errorStream_.str();
8723 MUTEX_UNLOCK( &stream_.mutex );
8724 error( RtAudioError::SYSTEM_ERROR );
8729 stream_.state = STREAM_STOPPED;
8730 MUTEX_UNLOCK( &stream_.mutex );
8733 bool RtApiPulse::probeDeviceOpen( unsigned int device, StreamMode mode,
8734 unsigned int channels, unsigned int firstChannel,
8735 unsigned int sampleRate, RtAudioFormat format,
8736 unsigned int *bufferSize, RtAudio::StreamOptions *options )
8738 PulseAudioHandle *pah = 0;
8739 unsigned long bufferBytes = 0;
8742 if ( device != 0 ) return false;
8743 if ( mode != INPUT && mode != OUTPUT ) return false;
8744 if ( channels != 1 && channels != 2 ) {
8745 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported number of channels.";
8748 ss.channels = channels;
8750 if ( firstChannel != 0 ) return false;
8752 bool sr_found = false;
8753 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr ) {
8754 if ( sampleRate == *sr ) {
8756 stream_.sampleRate = sampleRate;
8757 ss.rate = sampleRate;
8762 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported sample rate.";
8767 for ( const rtaudio_pa_format_mapping_t *sf = supported_sampleformats;
8768 sf->rtaudio_format && sf->pa_format != PA_SAMPLE_INVALID; ++sf ) {
8769 if ( format == sf->rtaudio_format ) {
8771 stream_.userFormat = sf->rtaudio_format;
8772 stream_.deviceFormat[mode] = stream_.userFormat;
8773 ss.format = sf->pa_format;
8777 if ( !sf_found ) { // Use internal data format conversion.
8778 stream_.userFormat = format;
8779 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
8780 ss.format = PA_SAMPLE_FLOAT32LE;
8783 // Set other stream parameters.
8784 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
8785 else stream_.userInterleaved = true;
8786 stream_.deviceInterleaved[mode] = true;
8787 stream_.nBuffers = 1;
8788 stream_.doByteSwap[mode] = false;
8789 stream_.nUserChannels[mode] = channels;
8790 stream_.nDeviceChannels[mode] = channels + firstChannel;
8791 stream_.channelOffset[mode] = 0;
8792 std::string streamName = "RtAudio";
8794 // Set flags for buffer conversion.
8795 stream_.doConvertBuffer[mode] = false;
8796 if ( stream_.userFormat != stream_.deviceFormat[mode] )
8797 stream_.doConvertBuffer[mode] = true;
8798 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
8799 stream_.doConvertBuffer[mode] = true;
8801 // Allocate necessary internal buffers.
8802 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
8803 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
8804 if ( stream_.userBuffer[mode] == NULL ) {
8805 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating user buffer memory.";
8808 stream_.bufferSize = *bufferSize;
8810 if ( stream_.doConvertBuffer[mode] ) {
8812 bool makeBuffer = true;
8813 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
8814 if ( mode == INPUT ) {
8815 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
8816 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
8817 if ( bufferBytes <= bytesOut ) makeBuffer = false;
8822 bufferBytes *= *bufferSize;
8823 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
8824 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
8825 if ( stream_.deviceBuffer == NULL ) {
8826 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating device buffer memory.";
8832 stream_.device[mode] = device;
8834 // Setup the buffer conversion information structure.
8835 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
8837 if ( !stream_.apiHandle ) {
8838 PulseAudioHandle *pah = new PulseAudioHandle;
8840 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating memory for handle.";
8844 stream_.apiHandle = pah;
8845 if ( pthread_cond_init( &pah->runnable_cv, NULL ) != 0 ) {
8846 errorText_ = "RtApiPulse::probeDeviceOpen: error creating condition variable.";
8850 pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8853 if ( options && !options->streamName.empty() ) streamName = options->streamName;
8856 pa_buffer_attr buffer_attr;
8857 buffer_attr.fragsize = bufferBytes;
8858 buffer_attr.maxlength = -1;
8860 pah->s_rec = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_RECORD, NULL, "Record", &ss, NULL, &buffer_attr, &error );
8861 if ( !pah->s_rec ) {
8862 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting input to PulseAudio server.";
8867 pah->s_play = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_PLAYBACK, NULL, "Playback", &ss, NULL, NULL, &error );
8868 if ( !pah->s_play ) {
8869 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting output to PulseAudio server.";
8877 if ( stream_.mode == UNINITIALIZED )
8878 stream_.mode = mode;
8879 else if ( stream_.mode == mode )
8882 stream_.mode = DUPLEX;
8884 if ( !stream_.callbackInfo.isRunning ) {
8885 stream_.callbackInfo.object = this;
8887 stream_.state = STREAM_STOPPED;
8888 // Set the thread attributes for joinable and realtime scheduling
8889 // priority (optional). The higher priority will only take affect
8890 // if the program is run as root or suid. Note, under Linux
8891 // processes with CAP_SYS_NICE privilege, a user can change
8892 // scheduling policy and priority (thus need not be root). See
8893 // POSIX "capabilities".
8894 pthread_attr_t attr;
8895 pthread_attr_init( &attr );
8896 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
8897 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
8898 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
8899 stream_.callbackInfo.doRealtime = true;
8900 struct sched_param param;
8901 int priority = options->priority;
8902 int min = sched_get_priority_min( SCHED_RR );
8903 int max = sched_get_priority_max( SCHED_RR );
8904 if ( priority < min ) priority = min;
8905 else if ( priority > max ) priority = max;
8906 param.sched_priority = priority;
8908 // Set the policy BEFORE the priority. Otherwise it fails.
8909 pthread_attr_setschedpolicy(&attr, SCHED_RR);
8910 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
8911 // This is definitely required. Otherwise it fails.
8912 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
8913 pthread_attr_setschedparam(&attr, ¶m);
8916 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
8918 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
8921 stream_.callbackInfo.isRunning = true;
8922 int result = pthread_create( &pah->thread, &attr, pulseaudio_callback, (void *)&stream_.callbackInfo);
8923 pthread_attr_destroy(&attr);
8925 // Failed. Try instead with default attributes.
8926 result = pthread_create( &pah->thread, NULL, pulseaudio_callback, (void *)&stream_.callbackInfo);
8928 stream_.callbackInfo.isRunning = false;
8929 errorText_ = "RtApiPulse::probeDeviceOpen: error creating thread.";
8938 if ( pah && stream_.callbackInfo.isRunning ) {
8939 pthread_cond_destroy( &pah->runnable_cv );
8941 stream_.apiHandle = 0;
8944 for ( int i=0; i<2; i++ ) {
8945 if ( stream_.userBuffer[i] ) {
8946 free( stream_.userBuffer[i] );
8947 stream_.userBuffer[i] = 0;
8951 if ( stream_.deviceBuffer ) {
8952 free( stream_.deviceBuffer );
8953 stream_.deviceBuffer = 0;
8956 stream_.state = STREAM_CLOSED;
8960 //******************** End of __LINUX_PULSE__ *********************//
8963 #if defined(__LINUX_OSS__)
8966 #include <sys/ioctl.h>
8969 #include <sys/soundcard.h>
8973 static void *ossCallbackHandler(void * ptr);
8975 // A structure to hold various information related to the OSS API
8978 int id[2]; // device ids
8981 pthread_cond_t runnable;
8984 :triggered(false) { id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
8987 RtApiOss :: RtApiOss()
8989 // Nothing to do here.
8992 RtApiOss :: ~RtApiOss()
8994 if ( stream_.state != STREAM_CLOSED ) closeStream();
8997 unsigned int RtApiOss :: getDeviceCount( void )
8999 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
9000 if ( mixerfd == -1 ) {
9001 errorText_ = "RtApiOss::getDeviceCount: error opening '/dev/mixer'.";
9002 error( RtAudioError::WARNING );
9006 oss_sysinfo sysinfo;
9007 if ( ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo ) == -1 ) {
9009 errorText_ = "RtApiOss::getDeviceCount: error getting sysinfo, OSS version >= 4.0 is required.";
9010 error( RtAudioError::WARNING );
9015 return sysinfo.numaudios;
9018 RtAudio::DeviceInfo RtApiOss :: getDeviceInfo( unsigned int device )
9020 RtAudio::DeviceInfo info;
9021 info.probed = false;
9023 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
9024 if ( mixerfd == -1 ) {
9025 errorText_ = "RtApiOss::getDeviceInfo: error opening '/dev/mixer'.";
9026 error( RtAudioError::WARNING );
9030 oss_sysinfo sysinfo;
9031 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
9032 if ( result == -1 ) {
9034 errorText_ = "RtApiOss::getDeviceInfo: error getting sysinfo, OSS version >= 4.0 is required.";
9035 error( RtAudioError::WARNING );
9039 unsigned nDevices = sysinfo.numaudios;
9040 if ( nDevices == 0 ) {
9042 errorText_ = "RtApiOss::getDeviceInfo: no devices found!";
9043 error( RtAudioError::INVALID_USE );
9047 if ( device >= nDevices ) {
9049 errorText_ = "RtApiOss::getDeviceInfo: device ID is invalid!";
9050 error( RtAudioError::INVALID_USE );
9054 oss_audioinfo ainfo;
9056 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
9058 if ( result == -1 ) {
9059 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
9060 errorText_ = errorStream_.str();
9061 error( RtAudioError::WARNING );
9066 if ( ainfo.caps & PCM_CAP_OUTPUT ) info.outputChannels = ainfo.max_channels;
9067 if ( ainfo.caps & PCM_CAP_INPUT ) info.inputChannels = ainfo.max_channels;
9068 if ( ainfo.caps & PCM_CAP_DUPLEX ) {
9069 if ( info.outputChannels > 0 && info.inputChannels > 0 && ainfo.caps & PCM_CAP_DUPLEX )
9070 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
9073 // Probe data formats ... do for input
9074 unsigned long mask = ainfo.iformats;
9075 if ( mask & AFMT_S16_LE || mask & AFMT_S16_BE )
9076 info.nativeFormats |= RTAUDIO_SINT16;
9077 if ( mask & AFMT_S8 )
9078 info.nativeFormats |= RTAUDIO_SINT8;
9079 if ( mask & AFMT_S32_LE || mask & AFMT_S32_BE )
9080 info.nativeFormats |= RTAUDIO_SINT32;
9082 if ( mask & AFMT_FLOAT )
9083 info.nativeFormats |= RTAUDIO_FLOAT32;
9085 if ( mask & AFMT_S24_LE || mask & AFMT_S24_BE )
9086 info.nativeFormats |= RTAUDIO_SINT24;
9088 // Check that we have at least one supported format
9089 if ( info.nativeFormats == 0 ) {
9090 errorStream_ << "RtApiOss::getDeviceInfo: device (" << ainfo.name << ") data format not supported by RtAudio.";
9091 errorText_ = errorStream_.str();
9092 error( RtAudioError::WARNING );
9096 // Probe the supported sample rates.
9097 info.sampleRates.clear();
9098 if ( ainfo.nrates ) {
9099 for ( unsigned int i=0; i<ainfo.nrates; i++ ) {
9100 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
9101 if ( ainfo.rates[i] == SAMPLE_RATES[k] ) {
9102 info.sampleRates.push_back( SAMPLE_RATES[k] );
9104 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
9105 info.preferredSampleRate = SAMPLE_RATES[k];
9113 // Check min and max rate values;
9114 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
9115 if ( ainfo.min_rate <= (int) SAMPLE_RATES[k] && ainfo.max_rate >= (int) SAMPLE_RATES[k] ) {
9116 info.sampleRates.push_back( SAMPLE_RATES[k] );
9118 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
9119 info.preferredSampleRate = SAMPLE_RATES[k];
9124 if ( info.sampleRates.size() == 0 ) {
9125 errorStream_ << "RtApiOss::getDeviceInfo: no supported sample rates found for device (" << ainfo.name << ").";
9126 errorText_ = errorStream_.str();
9127 error( RtAudioError::WARNING );
9131 info.name = ainfo.name;
9138 bool RtApiOss :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
9139 unsigned int firstChannel, unsigned int sampleRate,
9140 RtAudioFormat format, unsigned int *bufferSize,
9141 RtAudio::StreamOptions *options )
9143 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
9144 if ( mixerfd == -1 ) {
9145 errorText_ = "RtApiOss::probeDeviceOpen: error opening '/dev/mixer'.";
9149 oss_sysinfo sysinfo;
9150 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
9151 if ( result == -1 ) {
9153 errorText_ = "RtApiOss::probeDeviceOpen: error getting sysinfo, OSS version >= 4.0 is required.";
9157 unsigned nDevices = sysinfo.numaudios;
9158 if ( nDevices == 0 ) {
9159 // This should not happen because a check is made before this function is called.
9161 errorText_ = "RtApiOss::probeDeviceOpen: no devices found!";
9165 if ( device >= nDevices ) {
9166 // This should not happen because a check is made before this function is called.
9168 errorText_ = "RtApiOss::probeDeviceOpen: device ID is invalid!";
9172 oss_audioinfo ainfo;
9174 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
9176 if ( result == -1 ) {
9177 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
9178 errorText_ = errorStream_.str();
9182 // Check if device supports input or output
9183 if ( ( mode == OUTPUT && !( ainfo.caps & PCM_CAP_OUTPUT ) ) ||
9184 ( mode == INPUT && !( ainfo.caps & PCM_CAP_INPUT ) ) ) {
9185 if ( mode == OUTPUT )
9186 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support output.";
9188 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support input.";
9189 errorText_ = errorStream_.str();
9194 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9195 if ( mode == OUTPUT )
9197 else { // mode == INPUT
9198 if (stream_.mode == OUTPUT && stream_.device[0] == device) {
9199 // We just set the same device for playback ... close and reopen for duplex (OSS only).
9200 close( handle->id[0] );
9202 if ( !( ainfo.caps & PCM_CAP_DUPLEX ) ) {
9203 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support duplex mode.";
9204 errorText_ = errorStream_.str();
9207 // Check that the number previously set channels is the same.
9208 if ( stream_.nUserChannels[0] != channels ) {
9209 errorStream_ << "RtApiOss::probeDeviceOpen: input/output channels must be equal for OSS duplex device (" << ainfo.name << ").";
9210 errorText_ = errorStream_.str();
9219 // Set exclusive access if specified.
9220 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) flags |= O_EXCL;
9222 // Try to open the device.
9224 fd = open( ainfo.devnode, flags, 0 );
9226 if ( errno == EBUSY )
9227 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") is busy.";
9229 errorStream_ << "RtApiOss::probeDeviceOpen: error opening device (" << ainfo.name << ").";
9230 errorText_ = errorStream_.str();
9234 // For duplex operation, specifically set this mode (this doesn't seem to work).
9236 if ( flags | O_RDWR ) {
9237 result = ioctl( fd, SNDCTL_DSP_SETDUPLEX, NULL );
9238 if ( result == -1) {
9239 errorStream_ << "RtApiOss::probeDeviceOpen: error setting duplex mode for device (" << ainfo.name << ").";
9240 errorText_ = errorStream_.str();
9246 // Check the device channel support.
9247 stream_.nUserChannels[mode] = channels;
9248 if ( ainfo.max_channels < (int)(channels + firstChannel) ) {
9250 errorStream_ << "RtApiOss::probeDeviceOpen: the device (" << ainfo.name << ") does not support requested channel parameters.";
9251 errorText_ = errorStream_.str();
9255 // Set the number of channels.
9256 int deviceChannels = channels + firstChannel;
9257 result = ioctl( fd, SNDCTL_DSP_CHANNELS, &deviceChannels );
9258 if ( result == -1 || deviceChannels < (int)(channels + firstChannel) ) {
9260 errorStream_ << "RtApiOss::probeDeviceOpen: error setting channel parameters on device (" << ainfo.name << ").";
9261 errorText_ = errorStream_.str();
9264 stream_.nDeviceChannels[mode] = deviceChannels;
9266 // Get the data format mask
9268 result = ioctl( fd, SNDCTL_DSP_GETFMTS, &mask );
9269 if ( result == -1 ) {
9271 errorStream_ << "RtApiOss::probeDeviceOpen: error getting device (" << ainfo.name << ") data formats.";
9272 errorText_ = errorStream_.str();
9276 // Determine how to set the device format.
9277 stream_.userFormat = format;
9278 int deviceFormat = -1;
9279 stream_.doByteSwap[mode] = false;
9280 if ( format == RTAUDIO_SINT8 ) {
9281 if ( mask & AFMT_S8 ) {
9282 deviceFormat = AFMT_S8;
9283 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
9286 else if ( format == RTAUDIO_SINT16 ) {
9287 if ( mask & AFMT_S16_NE ) {
9288 deviceFormat = AFMT_S16_NE;
9289 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9291 else if ( mask & AFMT_S16_OE ) {
9292 deviceFormat = AFMT_S16_OE;
9293 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9294 stream_.doByteSwap[mode] = true;
9297 else if ( format == RTAUDIO_SINT24 ) {
9298 if ( mask & AFMT_S24_NE ) {
9299 deviceFormat = AFMT_S24_NE;
9300 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9302 else if ( mask & AFMT_S24_OE ) {
9303 deviceFormat = AFMT_S24_OE;
9304 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9305 stream_.doByteSwap[mode] = true;
9308 else if ( format == RTAUDIO_SINT32 ) {
9309 if ( mask & AFMT_S32_NE ) {
9310 deviceFormat = AFMT_S32_NE;
9311 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9313 else if ( mask & AFMT_S32_OE ) {
9314 deviceFormat = AFMT_S32_OE;
9315 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9316 stream_.doByteSwap[mode] = true;
9320 if ( deviceFormat == -1 ) {
9321 // The user requested format is not natively supported by the device.
9322 if ( mask & AFMT_S16_NE ) {
9323 deviceFormat = AFMT_S16_NE;
9324 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9326 else if ( mask & AFMT_S32_NE ) {
9327 deviceFormat = AFMT_S32_NE;
9328 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9330 else if ( mask & AFMT_S24_NE ) {
9331 deviceFormat = AFMT_S24_NE;
9332 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9334 else if ( mask & AFMT_S16_OE ) {
9335 deviceFormat = AFMT_S16_OE;
9336 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9337 stream_.doByteSwap[mode] = true;
9339 else if ( mask & AFMT_S32_OE ) {
9340 deviceFormat = AFMT_S32_OE;
9341 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9342 stream_.doByteSwap[mode] = true;
9344 else if ( mask & AFMT_S24_OE ) {
9345 deviceFormat = AFMT_S24_OE;
9346 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9347 stream_.doByteSwap[mode] = true;
9349 else if ( mask & AFMT_S8) {
9350 deviceFormat = AFMT_S8;
9351 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
9355 if ( stream_.deviceFormat[mode] == 0 ) {
9356 // This really shouldn't happen ...
9358 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") data format not supported by RtAudio.";
9359 errorText_ = errorStream_.str();
9363 // Set the data format.
9364 int temp = deviceFormat;
9365 result = ioctl( fd, SNDCTL_DSP_SETFMT, &deviceFormat );
9366 if ( result == -1 || deviceFormat != temp ) {
9368 errorStream_ << "RtApiOss::probeDeviceOpen: error setting data format on device (" << ainfo.name << ").";
9369 errorText_ = errorStream_.str();
9373 // Attempt to set the buffer size. According to OSS, the minimum
9374 // number of buffers is two. The supposed minimum buffer size is 16
9375 // bytes, so that will be our lower bound. The argument to this
9376 // call is in the form 0xMMMMSSSS (hex), where the buffer size (in
9377 // bytes) is given as 2^SSSS and the number of buffers as 2^MMMM.
9378 // We'll check the actual value used near the end of the setup
9380 int ossBufferBytes = *bufferSize * formatBytes( stream_.deviceFormat[mode] ) * deviceChannels;
9381 if ( ossBufferBytes < 16 ) ossBufferBytes = 16;
9383 if ( options ) buffers = options->numberOfBuffers;
9384 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) buffers = 2;
9385 if ( buffers < 2 ) buffers = 3;
9386 temp = ((int) buffers << 16) + (int)( log10( (double)ossBufferBytes ) / log10( 2.0 ) );
9387 result = ioctl( fd, SNDCTL_DSP_SETFRAGMENT, &temp );
9388 if ( result == -1 ) {
9390 errorStream_ << "RtApiOss::probeDeviceOpen: error setting buffer size on device (" << ainfo.name << ").";
9391 errorText_ = errorStream_.str();
9394 stream_.nBuffers = buffers;
9396 // Save buffer size (in sample frames).
9397 *bufferSize = ossBufferBytes / ( formatBytes(stream_.deviceFormat[mode]) * deviceChannels );
9398 stream_.bufferSize = *bufferSize;
9400 // Set the sample rate.
9401 int srate = sampleRate;
9402 result = ioctl( fd, SNDCTL_DSP_SPEED, &srate );
9403 if ( result == -1 ) {
9405 errorStream_ << "RtApiOss::probeDeviceOpen: error setting sample rate (" << sampleRate << ") on device (" << ainfo.name << ").";
9406 errorText_ = errorStream_.str();
9410 // Verify the sample rate setup worked.
9411 if ( abs( srate - (int)sampleRate ) > 100 ) {
9413 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support sample rate (" << sampleRate << ").";
9414 errorText_ = errorStream_.str();
9417 stream_.sampleRate = sampleRate;
9419 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device) {
9420 // We're doing duplex setup here.
9421 stream_.deviceFormat[0] = stream_.deviceFormat[1];
9422 stream_.nDeviceChannels[0] = deviceChannels;
9425 // Set interleaving parameters.
9426 stream_.userInterleaved = true;
9427 stream_.deviceInterleaved[mode] = true;
9428 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
9429 stream_.userInterleaved = false;
9431 // Set flags for buffer conversion
9432 stream_.doConvertBuffer[mode] = false;
9433 if ( stream_.userFormat != stream_.deviceFormat[mode] )
9434 stream_.doConvertBuffer[mode] = true;
9435 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
9436 stream_.doConvertBuffer[mode] = true;
9437 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
9438 stream_.nUserChannels[mode] > 1 )
9439 stream_.doConvertBuffer[mode] = true;
9441 // Allocate the stream handles if necessary and then save.
9442 if ( stream_.apiHandle == 0 ) {
9444 handle = new OssHandle;
9446 catch ( std::bad_alloc& ) {
9447 errorText_ = "RtApiOss::probeDeviceOpen: error allocating OssHandle memory.";
9451 if ( pthread_cond_init( &handle->runnable, NULL ) ) {
9452 errorText_ = "RtApiOss::probeDeviceOpen: error initializing pthread condition variable.";
9456 stream_.apiHandle = (void *) handle;
9459 handle = (OssHandle *) stream_.apiHandle;
9461 handle->id[mode] = fd;
9463 // Allocate necessary internal buffers.
9464 unsigned long bufferBytes;
9465 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
9466 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
9467 if ( stream_.userBuffer[mode] == NULL ) {
9468 errorText_ = "RtApiOss::probeDeviceOpen: error allocating user buffer memory.";
9472 if ( stream_.doConvertBuffer[mode] ) {
9474 bool makeBuffer = true;
9475 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
9476 if ( mode == INPUT ) {
9477 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
9478 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
9479 if ( bufferBytes <= bytesOut ) makeBuffer = false;
9484 bufferBytes *= *bufferSize;
9485 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
9486 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
9487 if ( stream_.deviceBuffer == NULL ) {
9488 errorText_ = "RtApiOss::probeDeviceOpen: error allocating device buffer memory.";
9494 stream_.device[mode] = device;
9495 stream_.state = STREAM_STOPPED;
9497 // Setup the buffer conversion information structure.
9498 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
9500 // Setup thread if necessary.
9501 if ( stream_.mode == OUTPUT && mode == INPUT ) {
9502 // We had already set up an output stream.
9503 stream_.mode = DUPLEX;
9504 if ( stream_.device[0] == device ) handle->id[0] = fd;
9507 stream_.mode = mode;
9509 // Setup callback thread.
9510 stream_.callbackInfo.object = (void *) this;
9512 // Set the thread attributes for joinable and realtime scheduling
9513 // priority. The higher priority will only take affect if the
9514 // program is run as root or suid.
9515 pthread_attr_t attr;
9516 pthread_attr_init( &attr );
9517 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
9518 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
9519 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
9520 stream_.callbackInfo.doRealtime = true;
9521 struct sched_param param;
9522 int priority = options->priority;
9523 int min = sched_get_priority_min( SCHED_RR );
9524 int max = sched_get_priority_max( SCHED_RR );
9525 if ( priority < min ) priority = min;
9526 else if ( priority > max ) priority = max;
9527 param.sched_priority = priority;
9529 // Set the policy BEFORE the priority. Otherwise it fails.
9530 pthread_attr_setschedpolicy(&attr, SCHED_RR);
9531 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
9532 // This is definitely required. Otherwise it fails.
9533 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
9534 pthread_attr_setschedparam(&attr, ¶m);
9537 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
9539 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
9542 stream_.callbackInfo.isRunning = true;
9543 result = pthread_create( &stream_.callbackInfo.thread, &attr, ossCallbackHandler, &stream_.callbackInfo );
9544 pthread_attr_destroy( &attr );
9546 // Failed. Try instead with default attributes.
9547 result = pthread_create( &stream_.callbackInfo.thread, NULL, ossCallbackHandler, &stream_.callbackInfo );
9549 stream_.callbackInfo.isRunning = false;
9550 errorText_ = "RtApiOss::error creating callback thread!";
9560 pthread_cond_destroy( &handle->runnable );
9561 if ( handle->id[0] ) close( handle->id[0] );
9562 if ( handle->id[1] ) close( handle->id[1] );
9564 stream_.apiHandle = 0;
9567 for ( int i=0; i<2; i++ ) {
9568 if ( stream_.userBuffer[i] ) {
9569 free( stream_.userBuffer[i] );
9570 stream_.userBuffer[i] = 0;
9574 if ( stream_.deviceBuffer ) {
9575 free( stream_.deviceBuffer );
9576 stream_.deviceBuffer = 0;
9579 stream_.state = STREAM_CLOSED;
9583 void RtApiOss :: closeStream()
9585 if ( stream_.state == STREAM_CLOSED ) {
9586 errorText_ = "RtApiOss::closeStream(): no open stream to close!";
9587 error( RtAudioError::WARNING );
9591 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9592 stream_.callbackInfo.isRunning = false;
9593 MUTEX_LOCK( &stream_.mutex );
9594 if ( stream_.state == STREAM_STOPPED )
9595 pthread_cond_signal( &handle->runnable );
9596 MUTEX_UNLOCK( &stream_.mutex );
9597 pthread_join( stream_.callbackInfo.thread, NULL );
9599 if ( stream_.state == STREAM_RUNNING ) {
9600 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
9601 ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9603 ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9604 stream_.state = STREAM_STOPPED;
9608 pthread_cond_destroy( &handle->runnable );
9609 if ( handle->id[0] ) close( handle->id[0] );
9610 if ( handle->id[1] ) close( handle->id[1] );
9612 stream_.apiHandle = 0;
9615 for ( int i=0; i<2; i++ ) {
9616 if ( stream_.userBuffer[i] ) {
9617 free( stream_.userBuffer[i] );
9618 stream_.userBuffer[i] = 0;
9622 if ( stream_.deviceBuffer ) {
9623 free( stream_.deviceBuffer );
9624 stream_.deviceBuffer = 0;
9627 stream_.mode = UNINITIALIZED;
9628 stream_.state = STREAM_CLOSED;
9631 void RtApiOss :: startStream()
9634 if ( stream_.state == STREAM_RUNNING ) {
9635 errorText_ = "RtApiOss::startStream(): the stream is already running!";
9636 error( RtAudioError::WARNING );
9640 MUTEX_LOCK( &stream_.mutex );
9642 stream_.state = STREAM_RUNNING;
9644 // No need to do anything else here ... OSS automatically starts
9645 // when fed samples.
9647 MUTEX_UNLOCK( &stream_.mutex );
9649 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9650 pthread_cond_signal( &handle->runnable );
9653 void RtApiOss :: stopStream()
9656 if ( stream_.state == STREAM_STOPPED ) {
9657 errorText_ = "RtApiOss::stopStream(): the stream is already stopped!";
9658 error( RtAudioError::WARNING );
9662 MUTEX_LOCK( &stream_.mutex );
9664 // The state might change while waiting on a mutex.
9665 if ( stream_.state == STREAM_STOPPED ) {
9666 MUTEX_UNLOCK( &stream_.mutex );
9671 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9672 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9674 // Flush the output with zeros a few times.
9677 RtAudioFormat format;
9679 if ( stream_.doConvertBuffer[0] ) {
9680 buffer = stream_.deviceBuffer;
9681 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
9682 format = stream_.deviceFormat[0];
9685 buffer = stream_.userBuffer[0];
9686 samples = stream_.bufferSize * stream_.nUserChannels[0];
9687 format = stream_.userFormat;
9690 memset( buffer, 0, samples * formatBytes(format) );
9691 for ( unsigned int i=0; i<stream_.nBuffers+1; i++ ) {
9692 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9693 if ( result == -1 ) {
9694 errorText_ = "RtApiOss::stopStream: audio write error.";
9695 error( RtAudioError::WARNING );
9699 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9700 if ( result == -1 ) {
9701 errorStream_ << "RtApiOss::stopStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
9702 errorText_ = errorStream_.str();
9705 handle->triggered = false;
9708 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
9709 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9710 if ( result == -1 ) {
9711 errorStream_ << "RtApiOss::stopStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
9712 errorText_ = errorStream_.str();
9718 stream_.state = STREAM_STOPPED;
9719 MUTEX_UNLOCK( &stream_.mutex );
9721 if ( result != -1 ) return;
9722 error( RtAudioError::SYSTEM_ERROR );
9725 void RtApiOss :: abortStream()
9728 if ( stream_.state == STREAM_STOPPED ) {
9729 errorText_ = "RtApiOss::abortStream(): the stream is already stopped!";
9730 error( RtAudioError::WARNING );
9734 MUTEX_LOCK( &stream_.mutex );
9736 // The state might change while waiting on a mutex.
9737 if ( stream_.state == STREAM_STOPPED ) {
9738 MUTEX_UNLOCK( &stream_.mutex );
9743 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9744 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9745 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9746 if ( result == -1 ) {
9747 errorStream_ << "RtApiOss::abortStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
9748 errorText_ = errorStream_.str();
9751 handle->triggered = false;
9754 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
9755 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9756 if ( result == -1 ) {
9757 errorStream_ << "RtApiOss::abortStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
9758 errorText_ = errorStream_.str();
9764 stream_.state = STREAM_STOPPED;
9765 MUTEX_UNLOCK( &stream_.mutex );
9767 if ( result != -1 ) return;
9768 error( RtAudioError::SYSTEM_ERROR );
9771 void RtApiOss :: callbackEvent()
9773 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9774 if ( stream_.state == STREAM_STOPPED ) {
9775 MUTEX_LOCK( &stream_.mutex );
9776 pthread_cond_wait( &handle->runnable, &stream_.mutex );
9777 if ( stream_.state != STREAM_RUNNING ) {
9778 MUTEX_UNLOCK( &stream_.mutex );
9781 MUTEX_UNLOCK( &stream_.mutex );
9784 if ( stream_.state == STREAM_CLOSED ) {
9785 errorText_ = "RtApiOss::callbackEvent(): the stream is closed ... this shouldn't happen!";
9786 error( RtAudioError::WARNING );
9790 // Invoke user callback to get fresh output data.
9791 int doStopStream = 0;
9792 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
9793 double streamTime = getStreamTime();
9794 RtAudioStreamStatus status = 0;
9795 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
9796 status |= RTAUDIO_OUTPUT_UNDERFLOW;
9797 handle->xrun[0] = false;
9799 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
9800 status |= RTAUDIO_INPUT_OVERFLOW;
9801 handle->xrun[1] = false;
9803 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
9804 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
9805 if ( doStopStream == 2 ) {
9806 this->abortStream();
9810 MUTEX_LOCK( &stream_.mutex );
9812 // The state might change while waiting on a mutex.
9813 if ( stream_.state == STREAM_STOPPED ) goto unlock;
9818 RtAudioFormat format;
9820 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9822 // Setup parameters and do buffer conversion if necessary.
9823 if ( stream_.doConvertBuffer[0] ) {
9824 buffer = stream_.deviceBuffer;
9825 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
9826 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
9827 format = stream_.deviceFormat[0];
9830 buffer = stream_.userBuffer[0];
9831 samples = stream_.bufferSize * stream_.nUserChannels[0];
9832 format = stream_.userFormat;
9835 // Do byte swapping if necessary.
9836 if ( stream_.doByteSwap[0] )
9837 byteSwapBuffer( buffer, samples, format );
9839 if ( stream_.mode == DUPLEX && handle->triggered == false ) {
9841 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
9842 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9843 trig = PCM_ENABLE_INPUT|PCM_ENABLE_OUTPUT;
9844 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
9845 handle->triggered = true;
9848 // Write samples to device.
9849 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9851 if ( result == -1 ) {
9852 // We'll assume this is an underrun, though there isn't a
9853 // specific means for determining that.
9854 handle->xrun[0] = true;
9855 errorText_ = "RtApiOss::callbackEvent: audio write error.";
9856 error( RtAudioError::WARNING );
9857 // Continue on to input section.
9861 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
9863 // Setup parameters.
9864 if ( stream_.doConvertBuffer[1] ) {
9865 buffer = stream_.deviceBuffer;
9866 samples = stream_.bufferSize * stream_.nDeviceChannels[1];
9867 format = stream_.deviceFormat[1];
9870 buffer = stream_.userBuffer[1];
9871 samples = stream_.bufferSize * stream_.nUserChannels[1];
9872 format = stream_.userFormat;
9875 // Read samples from device.
9876 result = read( handle->id[1], buffer, samples * formatBytes(format) );
9878 if ( result == -1 ) {
9879 // We'll assume this is an overrun, though there isn't a
9880 // specific means for determining that.
9881 handle->xrun[1] = true;
9882 errorText_ = "RtApiOss::callbackEvent: audio read error.";
9883 error( RtAudioError::WARNING );
9887 // Do byte swapping if necessary.
9888 if ( stream_.doByteSwap[1] )
9889 byteSwapBuffer( buffer, samples, format );
9891 // Do buffer conversion if necessary.
9892 if ( stream_.doConvertBuffer[1] )
9893 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
9897 MUTEX_UNLOCK( &stream_.mutex );
9899 RtApi::tickStreamTime();
9900 if ( doStopStream == 1 ) this->stopStream();
9903 static void *ossCallbackHandler( void *ptr )
9905 CallbackInfo *info = (CallbackInfo *) ptr;
9906 RtApiOss *object = (RtApiOss *) info->object;
9907 bool *isRunning = &info->isRunning;
9909 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
9910 if (info->doRealtime) {
9911 std::cerr << "RtAudio oss: " <<
9912 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
9913 "running realtime scheduling" << std::endl;
9917 while ( *isRunning == true ) {
9918 pthread_testcancel();
9919 object->callbackEvent();
9922 pthread_exit( NULL );
9925 //******************** End of __LINUX_OSS__ *********************//
9929 // *************************************************** //
9931 // Protected common (OS-independent) RtAudio methods.
9933 // *************************************************** //
9935 // This method can be modified to control the behavior of error
9936 // message printing.
9937 void RtApi :: error( RtAudioError::Type type )
9939 errorStream_.str(""); // clear the ostringstream
9941 RtAudioErrorCallback errorCallback = (RtAudioErrorCallback) stream_.callbackInfo.errorCallback;
9942 if ( errorCallback ) {
9943 // abortStream() can generate new error messages. Ignore them. Just keep original one.
9945 if ( firstErrorOccurred_ )
9948 firstErrorOccurred_ = true;
9949 const std::string errorMessage = errorText_;
9951 if ( type != RtAudioError::WARNING && stream_.state != STREAM_STOPPED) {
9952 stream_.callbackInfo.isRunning = false; // exit from the thread
9956 errorCallback( type, errorMessage );
9957 firstErrorOccurred_ = false;
9961 if ( type == RtAudioError::WARNING && showWarnings_ == true )
9962 std::cerr << '\n' << errorText_ << "\n\n";
9963 else if ( type != RtAudioError::WARNING )
9964 throw( RtAudioError( errorText_, type ) );
9967 void RtApi :: verifyStream()
9969 if ( stream_.state == STREAM_CLOSED ) {
9970 errorText_ = "RtApi:: a stream is not open!";
9971 error( RtAudioError::INVALID_USE );
9975 void RtApi :: clearStreamInfo()
9977 stream_.mode = UNINITIALIZED;
9978 stream_.state = STREAM_CLOSED;
9979 stream_.sampleRate = 0;
9980 stream_.bufferSize = 0;
9981 stream_.nBuffers = 0;
9982 stream_.userFormat = 0;
9983 stream_.userInterleaved = true;
9984 stream_.streamTime = 0.0;
9985 stream_.apiHandle = 0;
9986 stream_.deviceBuffer = 0;
9987 stream_.callbackInfo.callback = 0;
9988 stream_.callbackInfo.userData = 0;
9989 stream_.callbackInfo.isRunning = false;
9990 stream_.callbackInfo.errorCallback = 0;
9991 for ( int i=0; i<2; i++ ) {
9992 stream_.device[i] = 11111;
9993 stream_.doConvertBuffer[i] = false;
9994 stream_.deviceInterleaved[i] = true;
9995 stream_.doByteSwap[i] = false;
9996 stream_.nUserChannels[i] = 0;
9997 stream_.nDeviceChannels[i] = 0;
9998 stream_.channelOffset[i] = 0;
9999 stream_.deviceFormat[i] = 0;
10000 stream_.latency[i] = 0;
10001 stream_.userBuffer[i] = 0;
10002 stream_.convertInfo[i].channels = 0;
10003 stream_.convertInfo[i].inJump = 0;
10004 stream_.convertInfo[i].outJump = 0;
10005 stream_.convertInfo[i].inFormat = 0;
10006 stream_.convertInfo[i].outFormat = 0;
10007 stream_.convertInfo[i].inOffset.clear();
10008 stream_.convertInfo[i].outOffset.clear();
10012 unsigned int RtApi :: formatBytes( RtAudioFormat format )
10014 if ( format == RTAUDIO_SINT16 )
10016 else if ( format == RTAUDIO_SINT32 || format == RTAUDIO_FLOAT32 )
10018 else if ( format == RTAUDIO_FLOAT64 )
10020 else if ( format == RTAUDIO_SINT24 )
10022 else if ( format == RTAUDIO_SINT8 )
10025 errorText_ = "RtApi::formatBytes: undefined format.";
10026 error( RtAudioError::WARNING );
10031 void RtApi :: setConvertInfo( StreamMode mode, unsigned int firstChannel )
10033 if ( mode == INPUT ) { // convert device to user buffer
10034 stream_.convertInfo[mode].inJump = stream_.nDeviceChannels[1];
10035 stream_.convertInfo[mode].outJump = stream_.nUserChannels[1];
10036 stream_.convertInfo[mode].inFormat = stream_.deviceFormat[1];
10037 stream_.convertInfo[mode].outFormat = stream_.userFormat;
10039 else { // convert user to device buffer
10040 stream_.convertInfo[mode].inJump = stream_.nUserChannels[0];
10041 stream_.convertInfo[mode].outJump = stream_.nDeviceChannels[0];
10042 stream_.convertInfo[mode].inFormat = stream_.userFormat;
10043 stream_.convertInfo[mode].outFormat = stream_.deviceFormat[0];
10046 if ( stream_.convertInfo[mode].inJump < stream_.convertInfo[mode].outJump )
10047 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].inJump;
10049 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].outJump;
10051 // Set up the interleave/deinterleave offsets.
10052 if ( stream_.deviceInterleaved[mode] != stream_.userInterleaved ) {
10053 if ( ( mode == OUTPUT && stream_.deviceInterleaved[mode] ) ||
10054 ( mode == INPUT && stream_.userInterleaved ) ) {
10055 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
10056 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
10057 stream_.convertInfo[mode].outOffset.push_back( k );
10058 stream_.convertInfo[mode].inJump = 1;
10062 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
10063 stream_.convertInfo[mode].inOffset.push_back( k );
10064 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
10065 stream_.convertInfo[mode].outJump = 1;
10069 else { // no (de)interleaving
10070 if ( stream_.userInterleaved ) {
10071 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
10072 stream_.convertInfo[mode].inOffset.push_back( k );
10073 stream_.convertInfo[mode].outOffset.push_back( k );
10077 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
10078 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
10079 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
10080 stream_.convertInfo[mode].inJump = 1;
10081 stream_.convertInfo[mode].outJump = 1;
10086 // Add channel offset.
10087 if ( firstChannel > 0 ) {
10088 if ( stream_.deviceInterleaved[mode] ) {
10089 if ( mode == OUTPUT ) {
10090 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
10091 stream_.convertInfo[mode].outOffset[k] += firstChannel;
10094 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
10095 stream_.convertInfo[mode].inOffset[k] += firstChannel;
10099 if ( mode == OUTPUT ) {
10100 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
10101 stream_.convertInfo[mode].outOffset[k] += ( firstChannel * stream_.bufferSize );
10104 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
10105 stream_.convertInfo[mode].inOffset[k] += ( firstChannel * stream_.bufferSize );
10111 void RtApi :: convertBuffer( char *outBuffer, char *inBuffer, ConvertInfo &info )
10113 // This function does format conversion, input/output channel compensation, and
10114 // data interleaving/deinterleaving. 24-bit integers are assumed to occupy
10115 // the lower three bytes of a 32-bit integer.
10117 // Clear our device buffer when in/out duplex device channels are different
10118 if ( outBuffer == stream_.deviceBuffer && stream_.mode == DUPLEX &&
10119 ( stream_.nDeviceChannels[0] < stream_.nDeviceChannels[1] ) )
10120 memset( outBuffer, 0, stream_.bufferSize * info.outJump * formatBytes( info.outFormat ) );
10123 if (info.outFormat == RTAUDIO_FLOAT64) {
10125 Float64 *out = (Float64 *)outBuffer;
10127 if (info.inFormat == RTAUDIO_SINT8) {
10128 signed char *in = (signed char *)inBuffer;
10129 scale = 1.0 / 127.5;
10130 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10131 for (j=0; j<info.channels; j++) {
10132 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
10133 out[info.outOffset[j]] += 0.5;
10134 out[info.outOffset[j]] *= scale;
10137 out += info.outJump;
10140 else if (info.inFormat == RTAUDIO_SINT16) {
10141 Int16 *in = (Int16 *)inBuffer;
10142 scale = 1.0 / 32767.5;
10143 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10144 for (j=0; j<info.channels; j++) {
10145 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
10146 out[info.outOffset[j]] += 0.5;
10147 out[info.outOffset[j]] *= scale;
10150 out += info.outJump;
10153 else if (info.inFormat == RTAUDIO_SINT24) {
10154 Int24 *in = (Int24 *)inBuffer;
10155 scale = 1.0 / 8388607.5;
10156 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10157 for (j=0; j<info.channels; j++) {
10158 out[info.outOffset[j]] = (Float64) (in[info.inOffset[j]].asInt());
10159 out[info.outOffset[j]] += 0.5;
10160 out[info.outOffset[j]] *= scale;
10163 out += info.outJump;
10166 else if (info.inFormat == RTAUDIO_SINT32) {
10167 Int32 *in = (Int32 *)inBuffer;
10168 scale = 1.0 / 2147483647.5;
10169 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10170 for (j=0; j<info.channels; j++) {
10171 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
10172 out[info.outOffset[j]] += 0.5;
10173 out[info.outOffset[j]] *= scale;
10176 out += info.outJump;
10179 else if (info.inFormat == RTAUDIO_FLOAT32) {
10180 Float32 *in = (Float32 *)inBuffer;
10181 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10182 for (j=0; j<info.channels; j++) {
10183 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
10186 out += info.outJump;
10189 else if (info.inFormat == RTAUDIO_FLOAT64) {
10190 // Channel compensation and/or (de)interleaving only.
10191 Float64 *in = (Float64 *)inBuffer;
10192 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10193 for (j=0; j<info.channels; j++) {
10194 out[info.outOffset[j]] = in[info.inOffset[j]];
10197 out += info.outJump;
10201 else if (info.outFormat == RTAUDIO_FLOAT32) {
10203 Float32 *out = (Float32 *)outBuffer;
10205 if (info.inFormat == RTAUDIO_SINT8) {
10206 signed char *in = (signed char *)inBuffer;
10207 scale = (Float32) ( 1.0 / 127.5 );
10208 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10209 for (j=0; j<info.channels; j++) {
10210 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10211 out[info.outOffset[j]] += 0.5;
10212 out[info.outOffset[j]] *= scale;
10215 out += info.outJump;
10218 else if (info.inFormat == RTAUDIO_SINT16) {
10219 Int16 *in = (Int16 *)inBuffer;
10220 scale = (Float32) ( 1.0 / 32767.5 );
10221 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10222 for (j=0; j<info.channels; j++) {
10223 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10224 out[info.outOffset[j]] += 0.5;
10225 out[info.outOffset[j]] *= scale;
10228 out += info.outJump;
10231 else if (info.inFormat == RTAUDIO_SINT24) {
10232 Int24 *in = (Int24 *)inBuffer;
10233 scale = (Float32) ( 1.0 / 8388607.5 );
10234 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10235 for (j=0; j<info.channels; j++) {
10236 out[info.outOffset[j]] = (Float32) (in[info.inOffset[j]].asInt());
10237 out[info.outOffset[j]] += 0.5;
10238 out[info.outOffset[j]] *= scale;
10241 out += info.outJump;
10244 else if (info.inFormat == RTAUDIO_SINT32) {
10245 Int32 *in = (Int32 *)inBuffer;
10246 scale = (Float32) ( 1.0 / 2147483647.5 );
10247 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10248 for (j=0; j<info.channels; j++) {
10249 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10250 out[info.outOffset[j]] += 0.5;
10251 out[info.outOffset[j]] *= scale;
10254 out += info.outJump;
10257 else if (info.inFormat == RTAUDIO_FLOAT32) {
10258 // Channel compensation and/or (de)interleaving only.
10259 Float32 *in = (Float32 *)inBuffer;
10260 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10261 for (j=0; j<info.channels; j++) {
10262 out[info.outOffset[j]] = in[info.inOffset[j]];
10265 out += info.outJump;
10268 else if (info.inFormat == RTAUDIO_FLOAT64) {
10269 Float64 *in = (Float64 *)inBuffer;
10270 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10271 for (j=0; j<info.channels; j++) {
10272 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10275 out += info.outJump;
10279 else if (info.outFormat == RTAUDIO_SINT32) {
10280 Int32 *out = (Int32 *)outBuffer;
10281 if (info.inFormat == RTAUDIO_SINT8) {
10282 signed char *in = (signed char *)inBuffer;
10283 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10284 for (j=0; j<info.channels; j++) {
10285 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
10286 out[info.outOffset[j]] <<= 24;
10289 out += info.outJump;
10292 else if (info.inFormat == RTAUDIO_SINT16) {
10293 Int16 *in = (Int16 *)inBuffer;
10294 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10295 for (j=0; j<info.channels; j++) {
10296 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
10297 out[info.outOffset[j]] <<= 16;
10300 out += info.outJump;
10303 else if (info.inFormat == RTAUDIO_SINT24) {
10304 Int24 *in = (Int24 *)inBuffer;
10305 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10306 for (j=0; j<info.channels; j++) {
10307 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]].asInt();
10308 out[info.outOffset[j]] <<= 8;
10311 out += info.outJump;
10314 else if (info.inFormat == RTAUDIO_SINT32) {
10315 // Channel compensation and/or (de)interleaving only.
10316 Int32 *in = (Int32 *)inBuffer;
10317 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10318 for (j=0; j<info.channels; j++) {
10319 out[info.outOffset[j]] = in[info.inOffset[j]];
10322 out += info.outJump;
10325 else if (info.inFormat == RTAUDIO_FLOAT32) {
10326 Float32 *in = (Float32 *)inBuffer;
10327 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10328 for (j=0; j<info.channels; j++) {
10329 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
10332 out += info.outJump;
10335 else if (info.inFormat == RTAUDIO_FLOAT64) {
10336 Float64 *in = (Float64 *)inBuffer;
10337 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10338 for (j=0; j<info.channels; j++) {
10339 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
10342 out += info.outJump;
10346 else if (info.outFormat == RTAUDIO_SINT24) {
10347 Int24 *out = (Int24 *)outBuffer;
10348 if (info.inFormat == RTAUDIO_SINT8) {
10349 signed char *in = (signed char *)inBuffer;
10350 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10351 for (j=0; j<info.channels; j++) {
10352 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 16);
10353 //out[info.outOffset[j]] <<= 16;
10356 out += info.outJump;
10359 else if (info.inFormat == RTAUDIO_SINT16) {
10360 Int16 *in = (Int16 *)inBuffer;
10361 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10362 for (j=0; j<info.channels; j++) {
10363 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 8);
10364 //out[info.outOffset[j]] <<= 8;
10367 out += info.outJump;
10370 else if (info.inFormat == RTAUDIO_SINT24) {
10371 // Channel compensation and/or (de)interleaving only.
10372 Int24 *in = (Int24 *)inBuffer;
10373 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10374 for (j=0; j<info.channels; j++) {
10375 out[info.outOffset[j]] = in[info.inOffset[j]];
10378 out += info.outJump;
10381 else if (info.inFormat == RTAUDIO_SINT32) {
10382 Int32 *in = (Int32 *)inBuffer;
10383 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10384 for (j=0; j<info.channels; j++) {
10385 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] >> 8);
10386 //out[info.outOffset[j]] >>= 8;
10389 out += info.outJump;
10392 else if (info.inFormat == RTAUDIO_FLOAT32) {
10393 Float32 *in = (Float32 *)inBuffer;
10394 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10395 for (j=0; j<info.channels; j++) {
10396 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
10399 out += info.outJump;
10402 else if (info.inFormat == RTAUDIO_FLOAT64) {
10403 Float64 *in = (Float64 *)inBuffer;
10404 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10405 for (j=0; j<info.channels; j++) {
10406 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
10409 out += info.outJump;
10413 else if (info.outFormat == RTAUDIO_SINT16) {
10414 Int16 *out = (Int16 *)outBuffer;
10415 if (info.inFormat == RTAUDIO_SINT8) {
10416 signed char *in = (signed char *)inBuffer;
10417 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10418 for (j=0; j<info.channels; j++) {
10419 out[info.outOffset[j]] = (Int16) in[info.inOffset[j]];
10420 out[info.outOffset[j]] <<= 8;
10423 out += info.outJump;
10426 else if (info.inFormat == RTAUDIO_SINT16) {
10427 // Channel compensation and/or (de)interleaving only.
10428 Int16 *in = (Int16 *)inBuffer;
10429 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10430 for (j=0; j<info.channels; j++) {
10431 out[info.outOffset[j]] = in[info.inOffset[j]];
10434 out += info.outJump;
10437 else if (info.inFormat == RTAUDIO_SINT24) {
10438 Int24 *in = (Int24 *)inBuffer;
10439 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10440 for (j=0; j<info.channels; j++) {
10441 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]].asInt() >> 8);
10444 out += info.outJump;
10447 else if (info.inFormat == RTAUDIO_SINT32) {
10448 Int32 *in = (Int32 *)inBuffer;
10449 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10450 for (j=0; j<info.channels; j++) {
10451 out[info.outOffset[j]] = (Int16) ((in[info.inOffset[j]] >> 16) & 0x0000ffff);
10454 out += info.outJump;
10457 else if (info.inFormat == RTAUDIO_FLOAT32) {
10458 Float32 *in = (Float32 *)inBuffer;
10459 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10460 for (j=0; j<info.channels; j++) {
10461 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
10464 out += info.outJump;
10467 else if (info.inFormat == RTAUDIO_FLOAT64) {
10468 Float64 *in = (Float64 *)inBuffer;
10469 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10470 for (j=0; j<info.channels; j++) {
10471 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
10474 out += info.outJump;
10478 else if (info.outFormat == RTAUDIO_SINT8) {
10479 signed char *out = (signed char *)outBuffer;
10480 if (info.inFormat == RTAUDIO_SINT8) {
10481 // Channel compensation and/or (de)interleaving only.
10482 signed char *in = (signed char *)inBuffer;
10483 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10484 for (j=0; j<info.channels; j++) {
10485 out[info.outOffset[j]] = in[info.inOffset[j]];
10488 out += info.outJump;
10491 if (info.inFormat == RTAUDIO_SINT16) {
10492 Int16 *in = (Int16 *)inBuffer;
10493 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10494 for (j=0; j<info.channels; j++) {
10495 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 8) & 0x00ff);
10498 out += info.outJump;
10501 else if (info.inFormat == RTAUDIO_SINT24) {
10502 Int24 *in = (Int24 *)inBuffer;
10503 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10504 for (j=0; j<info.channels; j++) {
10505 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]].asInt() >> 16);
10508 out += info.outJump;
10511 else if (info.inFormat == RTAUDIO_SINT32) {
10512 Int32 *in = (Int32 *)inBuffer;
10513 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10514 for (j=0; j<info.channels; j++) {
10515 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 24) & 0x000000ff);
10518 out += info.outJump;
10521 else if (info.inFormat == RTAUDIO_FLOAT32) {
10522 Float32 *in = (Float32 *)inBuffer;
10523 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10524 for (j=0; j<info.channels; j++) {
10525 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
10528 out += info.outJump;
10531 else if (info.inFormat == RTAUDIO_FLOAT64) {
10532 Float64 *in = (Float64 *)inBuffer;
10533 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10534 for (j=0; j<info.channels; j++) {
10535 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
10538 out += info.outJump;
10544 //static inline uint16_t bswap_16(uint16_t x) { return (x>>8) | (x<<8); }
10545 //static inline uint32_t bswap_32(uint32_t x) { return (bswap_16(x&0xffff)<<16) | (bswap_16(x>>16)); }
10546 //static inline uint64_t bswap_64(uint64_t x) { return (((unsigned long long)bswap_32(x&0xffffffffull))<<32) | (bswap_32(x>>32)); }
10548 void RtApi :: byteSwapBuffer( char *buffer, unsigned int samples, RtAudioFormat format )
10554 if ( format == RTAUDIO_SINT16 ) {
10555 for ( unsigned int i=0; i<samples; i++ ) {
10556 // Swap 1st and 2nd bytes.
10561 // Increment 2 bytes.
10565 else if ( format == RTAUDIO_SINT32 ||
10566 format == RTAUDIO_FLOAT32 ) {
10567 for ( unsigned int i=0; i<samples; i++ ) {
10568 // Swap 1st and 4th bytes.
10573 // Swap 2nd and 3rd bytes.
10579 // Increment 3 more bytes.
10583 else if ( format == RTAUDIO_SINT24 ) {
10584 for ( unsigned int i=0; i<samples; i++ ) {
10585 // Swap 1st and 3rd bytes.
10590 // Increment 2 more bytes.
10594 else if ( format == RTAUDIO_FLOAT64 ) {
10595 for ( unsigned int i=0; i<samples; i++ ) {
10596 // Swap 1st and 8th bytes
10601 // Swap 2nd and 7th bytes
10607 // Swap 3rd and 6th bytes
10613 // Swap 4th and 5th bytes
10619 // Increment 5 more bytes.
10625 // Indentation settings for Vim and Emacs
10627 // Local Variables:
10628 // c-basic-offset: 2
10629 // indent-tabs-mode: nil
10632 // vim: et sts=2 sw=2