1 /************************************************************************/
3 \brief Realtime audio i/o C++ classes.
5 RtAudio provides a common API (Application Programming Interface)
6 for realtime audio input/output across Linux (native ALSA, Jack,
7 and OSS), Macintosh OS X (CoreAudio and Jack), and Windows
8 (DirectSound, ASIO and WASAPI) operating systems.
10 RtAudio WWW site: http://www.music.mcgill.ca/~gary/rtaudio/
12 RtAudio: realtime audio i/o C++ classes
13 Copyright (c) 2001-2017 Gary P. Scavone
15 Permission is hereby granted, free of charge, to any person
16 obtaining a copy of this software and associated documentation files
17 (the "Software"), to deal in the Software without restriction,
18 including without limitation the rights to use, copy, modify, merge,
19 publish, distribute, sublicense, and/or sell copies of the Software,
20 and to permit persons to whom the Software is furnished to do so,
21 subject to the following conditions:
23 The above copyright notice and this permission notice shall be
24 included in all copies or substantial portions of the Software.
26 Any person wishing to distribute modifications to the Software is
27 asked to send the modifications to the original developer so that
28 they can be incorporated into the canonical version. This is,
29 however, not a binding provision of this license.
31 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
32 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
33 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
34 IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
35 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
36 CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
37 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
39 /************************************************************************/
41 // RtAudio: Version 5.0.0
52 // Static variable definitions.
53 const unsigned int RtApi::MAX_SAMPLE_RATES = 14;
54 const unsigned int RtApi::SAMPLE_RATES[] = {
55 4000, 5512, 8000, 9600, 11025, 16000, 22050,
56 32000, 44100, 48000, 88200, 96000, 176400, 192000
59 #if defined(__WINDOWS_DS__) || defined(__WINDOWS_ASIO__) || defined(__WINDOWS_WASAPI__)
60 #define MUTEX_INITIALIZE(A) InitializeCriticalSection(A)
61 #define MUTEX_DESTROY(A) DeleteCriticalSection(A)
62 #define MUTEX_LOCK(A) EnterCriticalSection(A)
63 #define MUTEX_UNLOCK(A) LeaveCriticalSection(A)
67 static std::string convertCharPointerToStdString(const char *text)
69 return std::string(text);
72 static std::string convertCharPointerToStdString(const wchar_t *text)
74 int length = WideCharToMultiByte(CP_UTF8, 0, text, -1, NULL, 0, NULL, NULL);
75 std::string s( length-1, '\0' );
76 WideCharToMultiByte(CP_UTF8, 0, text, -1, &s[0], length, NULL, NULL);
80 #elif defined(__LINUX_ALSA__) || defined(__LINUX_PULSE__) || defined(__UNIX_JACK__) || defined(__LINUX_OSS__) || defined(__MACOSX_CORE__)
82 #define MUTEX_INITIALIZE(A) pthread_mutex_init(A, NULL)
83 #define MUTEX_DESTROY(A) pthread_mutex_destroy(A)
84 #define MUTEX_LOCK(A) pthread_mutex_lock(A)
85 #define MUTEX_UNLOCK(A) pthread_mutex_unlock(A)
87 #define MUTEX_INITIALIZE(A) abs(*A) // dummy definitions
88 #define MUTEX_DESTROY(A) abs(*A) // dummy definitions
91 // *************************************************** //
93 // RtAudio definitions.
95 // *************************************************** //
97 std::string RtAudio :: getVersion( void )
99 return RTAUDIO_VERSION;
102 // Define API names and display names.
103 // Must be in same order as API enum.
105 const char* rtaudio_api_names[][2] = {
106 { "unspecified" , "Unknown" },
108 { "pulse" , "Pulse" },
109 { "oss" , "OpenSoundSystem" },
111 { "core" , "CoreAudio" },
112 { "wasapi" , "WASAPI" },
114 { "ds" , "DirectSound" },
115 { "dummy" , "Dummy" },
117 const unsigned int rtaudio_num_api_names =
118 sizeof(rtaudio_api_names)/sizeof(rtaudio_api_names[0]);
120 // The order here will control the order of RtAudio's API search in
122 extern "C" const RtAudio::Api rtaudio_compiled_apis[] = {
123 #if defined(__UNIX_JACK__)
126 #if defined(__LINUX_PULSE__)
127 RtAudio::LINUX_PULSE,
129 #if defined(__LINUX_ALSA__)
132 #if defined(__LINUX_OSS__)
135 #if defined(__WINDOWS_ASIO__)
136 RtAudio::WINDOWS_ASIO,
138 #if defined(__WINDOWS_WASAPI__)
139 RtAudio::WINDOWS_WASAPI,
141 #if defined(__WINDOWS_DS__)
144 #if defined(__MACOSX_CORE__)
145 RtAudio::MACOSX_CORE,
147 #if defined(__RTAUDIO_DUMMY__)
148 RtAudio::RTAUDIO_DUMMY,
150 RtAudio::UNSPECIFIED,
152 extern "C" const unsigned int rtaudio_num_compiled_apis =
153 sizeof(rtaudio_compiled_apis)/sizeof(rtaudio_compiled_apis[0])-1;
156 // This is a compile-time check that rtaudio_num_api_names == RtAudio::NUM_APIS.
157 // If the build breaks here, check that they match.
158 template<bool b> class StaticAssert { private: StaticAssert() {} };
159 template<> class StaticAssert<true>{ public: StaticAssert() {} };
160 class StaticAssertions { StaticAssertions() {
161 StaticAssert<rtaudio_num_api_names == RtAudio::NUM_APIS>();
164 void RtAudio :: getCompiledApi( std::vector<RtAudio::Api> &apis )
166 apis = std::vector<RtAudio::Api>(rtaudio_compiled_apis,
167 rtaudio_compiled_apis + rtaudio_num_compiled_apis);
170 std::string RtAudio :: getApiName( RtAudio::Api api )
172 if (api < 0 || api >= RtAudio::NUM_APIS)
174 return rtaudio_api_names[api][0];
177 std::string RtAudio :: getApiDisplayName( RtAudio::Api api )
179 if (api < 0 || api >= RtAudio::NUM_APIS)
181 return rtaudio_api_names[api][1];
184 RtAudio::Api RtAudio :: getCompiledApiByName( const std::string &name )
187 for (i = 0; i < rtaudio_num_compiled_apis; ++i)
188 if (name == rtaudio_api_names[rtaudio_compiled_apis[i]][0])
189 return rtaudio_compiled_apis[i];
190 return RtAudio::UNSPECIFIED;
193 void RtAudio :: openRtApi( RtAudio::Api api )
199 #if defined(__UNIX_JACK__)
200 if ( api == UNIX_JACK )
201 rtapi_ = new RtApiJack();
203 #if defined(__LINUX_ALSA__)
204 if ( api == LINUX_ALSA )
205 rtapi_ = new RtApiAlsa();
207 #if defined(__LINUX_PULSE__)
208 if ( api == LINUX_PULSE )
209 rtapi_ = new RtApiPulse();
211 #if defined(__LINUX_OSS__)
212 if ( api == LINUX_OSS )
213 rtapi_ = new RtApiOss();
215 #if defined(__WINDOWS_ASIO__)
216 if ( api == WINDOWS_ASIO )
217 rtapi_ = new RtApiAsio();
219 #if defined(__WINDOWS_WASAPI__)
220 if ( api == WINDOWS_WASAPI )
221 rtapi_ = new RtApiWasapi();
223 #if defined(__WINDOWS_DS__)
224 if ( api == WINDOWS_DS )
225 rtapi_ = new RtApiDs();
227 #if defined(__MACOSX_CORE__)
228 if ( api == MACOSX_CORE )
229 rtapi_ = new RtApiCore();
231 #if defined(__RTAUDIO_DUMMY__)
232 if ( api == RTAUDIO_DUMMY )
233 rtapi_ = new RtApiDummy();
237 RtAudio :: RtAudio( RtAudio::Api api )
241 if ( api != UNSPECIFIED ) {
242 // Attempt to open the specified API.
244 if ( rtapi_ ) return;
246 // No compiled support for specified API value. Issue a debug
247 // warning and continue as if no API was specified.
248 std::cerr << "\nRtAudio: no compiled support for specified API argument!\n" << std::endl;
251 // Iterate through the compiled APIs and return as soon as we find
252 // one with at least one device or we reach the end of the list.
253 std::vector< RtAudio::Api > apis;
254 getCompiledApi( apis );
255 for ( unsigned int i=0; i<apis.size(); i++ ) {
256 openRtApi( apis[i] );
257 if ( rtapi_ && rtapi_->getDeviceCount() ) break;
260 if ( rtapi_ ) return;
262 // It should not be possible to get here because the preprocessor
263 // definition __RTAUDIO_DUMMY__ is automatically defined if no
264 // API-specific definitions are passed to the compiler. But just in
265 // case something weird happens, we'll thow an error.
266 std::string errorText = "\nRtAudio: no compiled API support found ... critical error!!\n\n";
267 throw( RtAudioError( errorText, RtAudioError::UNSPECIFIED ) );
270 RtAudio :: ~RtAudio()
276 void RtAudio :: openStream( RtAudio::StreamParameters *outputParameters,
277 RtAudio::StreamParameters *inputParameters,
278 RtAudioFormat format, unsigned int sampleRate,
279 unsigned int *bufferFrames,
280 RtAudioCallback callback, void *userData,
281 RtAudio::StreamOptions *options,
282 RtAudioErrorCallback errorCallback )
284 return rtapi_->openStream( outputParameters, inputParameters, format,
285 sampleRate, bufferFrames, callback,
286 userData, options, errorCallback );
289 // *************************************************** //
291 // Public RtApi definitions (see end of file for
292 // private or protected utility functions).
294 // *************************************************** //
298 stream_.state = STREAM_CLOSED;
299 stream_.mode = UNINITIALIZED;
300 stream_.apiHandle = 0;
301 stream_.userBuffer[0] = 0;
302 stream_.userBuffer[1] = 0;
303 MUTEX_INITIALIZE( &stream_.mutex );
304 showWarnings_ = true;
305 firstErrorOccurred_ = false;
310 MUTEX_DESTROY( &stream_.mutex );
313 void RtApi :: openStream( RtAudio::StreamParameters *oParams,
314 RtAudio::StreamParameters *iParams,
315 RtAudioFormat format, unsigned int sampleRate,
316 unsigned int *bufferFrames,
317 RtAudioCallback callback, void *userData,
318 RtAudio::StreamOptions *options,
319 RtAudioErrorCallback errorCallback )
321 if ( stream_.state != STREAM_CLOSED ) {
322 errorText_ = "RtApi::openStream: a stream is already open!";
323 error( RtAudioError::INVALID_USE );
327 // Clear stream information potentially left from a previously open stream.
330 if ( oParams && oParams->nChannels < 1 ) {
331 errorText_ = "RtApi::openStream: a non-NULL output StreamParameters structure cannot have an nChannels value less than one.";
332 error( RtAudioError::INVALID_USE );
336 if ( iParams && iParams->nChannels < 1 ) {
337 errorText_ = "RtApi::openStream: a non-NULL input StreamParameters structure cannot have an nChannels value less than one.";
338 error( RtAudioError::INVALID_USE );
342 if ( oParams == NULL && iParams == NULL ) {
343 errorText_ = "RtApi::openStream: input and output StreamParameters structures are both NULL!";
344 error( RtAudioError::INVALID_USE );
348 if ( formatBytes(format) == 0 ) {
349 errorText_ = "RtApi::openStream: 'format' parameter value is undefined.";
350 error( RtAudioError::INVALID_USE );
354 unsigned int nDevices = getDeviceCount();
355 unsigned int oChannels = 0;
357 oChannels = oParams->nChannels;
358 if ( oParams->deviceId >= nDevices ) {
359 errorText_ = "RtApi::openStream: output device parameter value is invalid.";
360 error( RtAudioError::INVALID_USE );
365 unsigned int iChannels = 0;
367 iChannels = iParams->nChannels;
368 if ( iParams->deviceId >= nDevices ) {
369 errorText_ = "RtApi::openStream: input device parameter value is invalid.";
370 error( RtAudioError::INVALID_USE );
377 if ( oChannels > 0 ) {
379 result = probeDeviceOpen( oParams->deviceId, OUTPUT, oChannels, oParams->firstChannel,
380 sampleRate, format, bufferFrames, options );
381 if ( result == false ) {
382 error( RtAudioError::SYSTEM_ERROR );
387 if ( iChannels > 0 ) {
389 result = probeDeviceOpen( iParams->deviceId, INPUT, iChannels, iParams->firstChannel,
390 sampleRate, format, bufferFrames, options );
391 if ( result == false ) {
392 if ( oChannels > 0 ) closeStream();
393 error( RtAudioError::SYSTEM_ERROR );
398 stream_.callbackInfo.callback = (void *) callback;
399 stream_.callbackInfo.userData = userData;
400 stream_.callbackInfo.errorCallback = (void *) errorCallback;
402 if ( options ) options->numberOfBuffers = stream_.nBuffers;
403 stream_.state = STREAM_STOPPED;
406 unsigned int RtApi :: getDefaultInputDevice( void )
408 // Should be implemented in subclasses if possible.
412 unsigned int RtApi :: getDefaultOutputDevice( void )
414 // Should be implemented in subclasses if possible.
418 void RtApi :: closeStream( void )
420 // MUST be implemented in subclasses!
424 bool RtApi :: probeDeviceOpen( unsigned int /*device*/, StreamMode /*mode*/, unsigned int /*channels*/,
425 unsigned int /*firstChannel*/, unsigned int /*sampleRate*/,
426 RtAudioFormat /*format*/, unsigned int * /*bufferSize*/,
427 RtAudio::StreamOptions * /*options*/ )
429 // MUST be implemented in subclasses!
433 void RtApi :: tickStreamTime( void )
435 // Subclasses that do not provide their own implementation of
436 // getStreamTime should call this function once per buffer I/O to
437 // provide basic stream time support.
439 stream_.streamTime += ( stream_.bufferSize * 1.0 / stream_.sampleRate );
441 #if defined( HAVE_GETTIMEOFDAY )
442 gettimeofday( &stream_.lastTickTimestamp, NULL );
446 long RtApi :: getStreamLatency( void )
450 long totalLatency = 0;
451 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
452 totalLatency = stream_.latency[0];
453 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
454 totalLatency += stream_.latency[1];
459 double RtApi :: getStreamTime( void )
463 #if defined( HAVE_GETTIMEOFDAY )
464 // Return a very accurate estimate of the stream time by
465 // adding in the elapsed time since the last tick.
469 if ( stream_.state != STREAM_RUNNING || (stream_.lastTickTimestamp.tv_sec == 0 && stream_.lastTickTimestamp.tv_usec == 0) )
470 return stream_.streamTime;
472 gettimeofday( &now, NULL );
473 then = stream_.lastTickTimestamp;
474 return stream_.streamTime +
475 ((now.tv_sec + 0.000001 * now.tv_usec) -
476 (then.tv_sec + 0.000001 * then.tv_usec));
478 return stream_.streamTime;
482 void RtApi :: setStreamTime( double time )
487 stream_.streamTime = time;
488 #if defined( HAVE_GETTIMEOFDAY )
489 gettimeofday( &stream_.lastTickTimestamp, NULL );
493 unsigned int RtApi :: getStreamSampleRate( void )
497 return stream_.sampleRate;
500 void RtApi :: startStream( void )
502 #if defined( HAVE_GETTIMEOFDAY )
503 stream_.lastTickTimestamp.tv_sec = 0;
504 stream_.lastTickTimestamp.tv_usec = 0;
509 // *************************************************** //
511 // OS/API-specific methods.
513 // *************************************************** //
515 #if defined(__MACOSX_CORE__)
517 // The OS X CoreAudio API is designed to use a separate callback
518 // procedure for each of its audio devices. A single RtAudio duplex
519 // stream using two different devices is supported here, though it
520 // cannot be guaranteed to always behave correctly because we cannot
521 // synchronize these two callbacks.
523 // A property listener is installed for over/underrun information.
524 // However, no functionality is currently provided to allow property
525 // listeners to trigger user handlers because it is unclear what could
526 // be done if a critical stream parameter (buffer size, sample rate,
527 // device disconnect) notification arrived. The listeners entail
528 // quite a bit of extra code and most likely, a user program wouldn't
529 // be prepared for the result anyway. However, we do provide a flag
530 // to the client callback function to inform of an over/underrun.
532 // A structure to hold various information related to the CoreAudio API
535 AudioDeviceID id[2]; // device ids
536 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
537 AudioDeviceIOProcID procId[2];
539 UInt32 iStream[2]; // device stream index (or first if using multiple)
540 UInt32 nStreams[2]; // number of streams to use
543 pthread_cond_t condition;
544 int drainCounter; // Tracks callback counts when draining
545 bool internalDrain; // Indicates if stop is initiated from callback or not.
548 :deviceBuffer(0), drainCounter(0), internalDrain(false) { nStreams[0] = 1; nStreams[1] = 1; id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
551 RtApiCore:: RtApiCore()
553 #if defined( AVAILABLE_MAC_OS_X_VERSION_10_6_AND_LATER )
554 // This is a largely undocumented but absolutely necessary
555 // requirement starting with OS-X 10.6. If not called, queries and
556 // updates to various audio device properties are not handled
558 CFRunLoopRef theRunLoop = NULL;
559 AudioObjectPropertyAddress property = { kAudioHardwarePropertyRunLoop,
560 kAudioObjectPropertyScopeGlobal,
561 kAudioObjectPropertyElementMaster };
562 OSStatus result = AudioObjectSetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, sizeof(CFRunLoopRef), &theRunLoop);
563 if ( result != noErr ) {
564 errorText_ = "RtApiCore::RtApiCore: error setting run loop property!";
565 error( RtAudioError::WARNING );
570 RtApiCore :: ~RtApiCore()
572 // The subclass destructor gets called before the base class
573 // destructor, so close an existing stream before deallocating
574 // apiDeviceId memory.
575 if ( stream_.state != STREAM_CLOSED ) closeStream();
578 unsigned int RtApiCore :: getDeviceCount( void )
580 // Find out how many audio devices there are, if any.
582 AudioObjectPropertyAddress propertyAddress = { kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
583 OSStatus result = AudioObjectGetPropertyDataSize( kAudioObjectSystemObject, &propertyAddress, 0, NULL, &dataSize );
584 if ( result != noErr ) {
585 errorText_ = "RtApiCore::getDeviceCount: OS-X error getting device info!";
586 error( RtAudioError::WARNING );
590 return dataSize / sizeof( AudioDeviceID );
593 unsigned int RtApiCore :: getDefaultInputDevice( void )
595 unsigned int nDevices = getDeviceCount();
596 if ( nDevices <= 1 ) return 0;
599 UInt32 dataSize = sizeof( AudioDeviceID );
600 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultInputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
601 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
602 if ( result != noErr ) {
603 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device.";
604 error( RtAudioError::WARNING );
608 dataSize *= nDevices;
609 AudioDeviceID deviceList[ nDevices ];
610 property.mSelector = kAudioHardwarePropertyDevices;
611 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
612 if ( result != noErr ) {
613 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device IDs.";
614 error( RtAudioError::WARNING );
618 for ( unsigned int i=0; i<nDevices; i++ )
619 if ( id == deviceList[i] ) return i;
621 errorText_ = "RtApiCore::getDefaultInputDevice: No default device found!";
622 error( RtAudioError::WARNING );
626 unsigned int RtApiCore :: getDefaultOutputDevice( void )
628 unsigned int nDevices = getDeviceCount();
629 if ( nDevices <= 1 ) return 0;
632 UInt32 dataSize = sizeof( AudioDeviceID );
633 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultOutputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
634 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
635 if ( result != noErr ) {
636 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device.";
637 error( RtAudioError::WARNING );
641 dataSize = sizeof( AudioDeviceID ) * nDevices;
642 AudioDeviceID deviceList[ nDevices ];
643 property.mSelector = kAudioHardwarePropertyDevices;
644 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
645 if ( result != noErr ) {
646 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device IDs.";
647 error( RtAudioError::WARNING );
651 for ( unsigned int i=0; i<nDevices; i++ )
652 if ( id == deviceList[i] ) return i;
654 errorText_ = "RtApiCore::getDefaultOutputDevice: No default device found!";
655 error( RtAudioError::WARNING );
659 RtAudio::DeviceInfo RtApiCore :: getDeviceInfo( unsigned int device )
661 RtAudio::DeviceInfo info;
665 unsigned int nDevices = getDeviceCount();
666 if ( nDevices == 0 ) {
667 errorText_ = "RtApiCore::getDeviceInfo: no devices found!";
668 error( RtAudioError::INVALID_USE );
672 if ( device >= nDevices ) {
673 errorText_ = "RtApiCore::getDeviceInfo: device ID is invalid!";
674 error( RtAudioError::INVALID_USE );
678 AudioDeviceID deviceList[ nDevices ];
679 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
680 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
681 kAudioObjectPropertyScopeGlobal,
682 kAudioObjectPropertyElementMaster };
683 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
684 0, NULL, &dataSize, (void *) &deviceList );
685 if ( result != noErr ) {
686 errorText_ = "RtApiCore::getDeviceInfo: OS-X system error getting device IDs.";
687 error( RtAudioError::WARNING );
691 AudioDeviceID id = deviceList[ device ];
693 // Get the device name.
696 dataSize = sizeof( CFStringRef );
697 property.mSelector = kAudioObjectPropertyManufacturer;
698 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
699 if ( result != noErr ) {
700 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device manufacturer.";
701 errorText_ = errorStream_.str();
702 error( RtAudioError::WARNING );
706 //const char *mname = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
707 int length = CFStringGetLength(cfname);
708 char *mname = (char *)malloc(length * 3 + 1);
709 #if defined( UNICODE ) || defined( _UNICODE )
710 CFStringGetCString(cfname, mname, length * 3 + 1, kCFStringEncodingUTF8);
712 CFStringGetCString(cfname, mname, length * 3 + 1, CFStringGetSystemEncoding());
714 info.name.append( (const char *)mname, strlen(mname) );
715 info.name.append( ": " );
719 property.mSelector = kAudioObjectPropertyName;
720 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
721 if ( result != noErr ) {
722 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device name.";
723 errorText_ = errorStream_.str();
724 error( RtAudioError::WARNING );
728 //const char *name = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
729 length = CFStringGetLength(cfname);
730 char *name = (char *)malloc(length * 3 + 1);
731 #if defined( UNICODE ) || defined( _UNICODE )
732 CFStringGetCString(cfname, name, length * 3 + 1, kCFStringEncodingUTF8);
734 CFStringGetCString(cfname, name, length * 3 + 1, CFStringGetSystemEncoding());
736 info.name.append( (const char *)name, strlen(name) );
740 // Get the output stream "configuration".
741 AudioBufferList *bufferList = nil;
742 property.mSelector = kAudioDevicePropertyStreamConfiguration;
743 property.mScope = kAudioDevicePropertyScopeOutput;
744 // property.mElement = kAudioObjectPropertyElementWildcard;
746 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
747 if ( result != noErr || dataSize == 0 ) {
748 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration info for device (" << device << ").";
749 errorText_ = errorStream_.str();
750 error( RtAudioError::WARNING );
754 // Allocate the AudioBufferList.
755 bufferList = (AudioBufferList *) malloc( dataSize );
756 if ( bufferList == NULL ) {
757 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating output AudioBufferList.";
758 error( RtAudioError::WARNING );
762 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
763 if ( result != noErr || dataSize == 0 ) {
765 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration for device (" << device << ").";
766 errorText_ = errorStream_.str();
767 error( RtAudioError::WARNING );
771 // Get output channel information.
772 unsigned int i, nStreams = bufferList->mNumberBuffers;
773 for ( i=0; i<nStreams; i++ )
774 info.outputChannels += bufferList->mBuffers[i].mNumberChannels;
777 // Get the input stream "configuration".
778 property.mScope = kAudioDevicePropertyScopeInput;
779 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
780 if ( result != noErr || dataSize == 0 ) {
781 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration info for device (" << device << ").";
782 errorText_ = errorStream_.str();
783 error( RtAudioError::WARNING );
787 // Allocate the AudioBufferList.
788 bufferList = (AudioBufferList *) malloc( dataSize );
789 if ( bufferList == NULL ) {
790 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating input AudioBufferList.";
791 error( RtAudioError::WARNING );
795 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
796 if (result != noErr || dataSize == 0) {
798 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration for device (" << device << ").";
799 errorText_ = errorStream_.str();
800 error( RtAudioError::WARNING );
804 // Get input channel information.
805 nStreams = bufferList->mNumberBuffers;
806 for ( i=0; i<nStreams; i++ )
807 info.inputChannels += bufferList->mBuffers[i].mNumberChannels;
810 // If device opens for both playback and capture, we determine the channels.
811 if ( info.outputChannels > 0 && info.inputChannels > 0 )
812 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
814 // Probe the device sample rates.
815 bool isInput = false;
816 if ( info.outputChannels == 0 ) isInput = true;
818 // Determine the supported sample rates.
819 property.mSelector = kAudioDevicePropertyAvailableNominalSampleRates;
820 if ( isInput == false ) property.mScope = kAudioDevicePropertyScopeOutput;
821 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
822 if ( result != kAudioHardwareNoError || dataSize == 0 ) {
823 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rate info.";
824 errorText_ = errorStream_.str();
825 error( RtAudioError::WARNING );
829 UInt32 nRanges = dataSize / sizeof( AudioValueRange );
830 AudioValueRange rangeList[ nRanges ];
831 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &rangeList );
832 if ( result != kAudioHardwareNoError ) {
833 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rates.";
834 errorText_ = errorStream_.str();
835 error( RtAudioError::WARNING );
839 // The sample rate reporting mechanism is a bit of a mystery. It
840 // seems that it can either return individual rates or a range of
841 // rates. I assume that if the min / max range values are the same,
842 // then that represents a single supported rate and if the min / max
843 // range values are different, the device supports an arbitrary
844 // range of values (though there might be multiple ranges, so we'll
845 // use the most conservative range).
846 Float64 minimumRate = 1.0, maximumRate = 10000000000.0;
847 bool haveValueRange = false;
848 info.sampleRates.clear();
849 for ( UInt32 i=0; i<nRanges; i++ ) {
850 if ( rangeList[i].mMinimum == rangeList[i].mMaximum ) {
851 unsigned int tmpSr = (unsigned int) rangeList[i].mMinimum;
852 info.sampleRates.push_back( tmpSr );
854 if ( !info.preferredSampleRate || ( tmpSr <= 48000 && tmpSr > info.preferredSampleRate ) )
855 info.preferredSampleRate = tmpSr;
858 haveValueRange = true;
859 if ( rangeList[i].mMinimum > minimumRate ) minimumRate = rangeList[i].mMinimum;
860 if ( rangeList[i].mMaximum < maximumRate ) maximumRate = rangeList[i].mMaximum;
864 if ( haveValueRange ) {
865 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
866 if ( SAMPLE_RATES[k] >= (unsigned int) minimumRate && SAMPLE_RATES[k] <= (unsigned int) maximumRate ) {
867 info.sampleRates.push_back( SAMPLE_RATES[k] );
869 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
870 info.preferredSampleRate = SAMPLE_RATES[k];
875 // Sort and remove any redundant values
876 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
877 info.sampleRates.erase( unique( info.sampleRates.begin(), info.sampleRates.end() ), info.sampleRates.end() );
879 if ( info.sampleRates.size() == 0 ) {
880 errorStream_ << "RtApiCore::probeDeviceInfo: No supported sample rates found for device (" << device << ").";
881 errorText_ = errorStream_.str();
882 error( RtAudioError::WARNING );
886 // CoreAudio always uses 32-bit floating point data for PCM streams.
887 // Thus, any other "physical" formats supported by the device are of
888 // no interest to the client.
889 info.nativeFormats = RTAUDIO_FLOAT32;
891 if ( info.outputChannels > 0 )
892 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
893 if ( info.inputChannels > 0 )
894 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
900 static OSStatus callbackHandler( AudioDeviceID inDevice,
901 const AudioTimeStamp* /*inNow*/,
902 const AudioBufferList* inInputData,
903 const AudioTimeStamp* /*inInputTime*/,
904 AudioBufferList* outOutputData,
905 const AudioTimeStamp* /*inOutputTime*/,
908 CallbackInfo *info = (CallbackInfo *) infoPointer;
910 RtApiCore *object = (RtApiCore *) info->object;
911 if ( object->callbackEvent( inDevice, inInputData, outOutputData ) == false )
912 return kAudioHardwareUnspecifiedError;
914 return kAudioHardwareNoError;
917 static OSStatus xrunListener( AudioObjectID /*inDevice*/,
919 const AudioObjectPropertyAddress properties[],
920 void* handlePointer )
922 CoreHandle *handle = (CoreHandle *) handlePointer;
923 for ( UInt32 i=0; i<nAddresses; i++ ) {
924 if ( properties[i].mSelector == kAudioDeviceProcessorOverload ) {
925 if ( properties[i].mScope == kAudioDevicePropertyScopeInput )
926 handle->xrun[1] = true;
928 handle->xrun[0] = true;
932 return kAudioHardwareNoError;
935 static OSStatus rateListener( AudioObjectID inDevice,
936 UInt32 /*nAddresses*/,
937 const AudioObjectPropertyAddress /*properties*/[],
940 Float64 *rate = (Float64 *) ratePointer;
941 UInt32 dataSize = sizeof( Float64 );
942 AudioObjectPropertyAddress property = { kAudioDevicePropertyNominalSampleRate,
943 kAudioObjectPropertyScopeGlobal,
944 kAudioObjectPropertyElementMaster };
945 AudioObjectGetPropertyData( inDevice, &property, 0, NULL, &dataSize, rate );
946 return kAudioHardwareNoError;
949 bool RtApiCore :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
950 unsigned int firstChannel, unsigned int sampleRate,
951 RtAudioFormat format, unsigned int *bufferSize,
952 RtAudio::StreamOptions *options )
955 unsigned int nDevices = getDeviceCount();
956 if ( nDevices == 0 ) {
957 // This should not happen because a check is made before this function is called.
958 errorText_ = "RtApiCore::probeDeviceOpen: no devices found!";
962 if ( device >= nDevices ) {
963 // This should not happen because a check is made before this function is called.
964 errorText_ = "RtApiCore::probeDeviceOpen: device ID is invalid!";
968 AudioDeviceID deviceList[ nDevices ];
969 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
970 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
971 kAudioObjectPropertyScopeGlobal,
972 kAudioObjectPropertyElementMaster };
973 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
974 0, NULL, &dataSize, (void *) &deviceList );
975 if ( result != noErr ) {
976 errorText_ = "RtApiCore::probeDeviceOpen: OS-X system error getting device IDs.";
980 AudioDeviceID id = deviceList[ device ];
982 // Setup for stream mode.
983 bool isInput = false;
984 if ( mode == INPUT ) {
986 property.mScope = kAudioDevicePropertyScopeInput;
989 property.mScope = kAudioDevicePropertyScopeOutput;
991 // Get the stream "configuration".
992 AudioBufferList *bufferList = nil;
994 property.mSelector = kAudioDevicePropertyStreamConfiguration;
995 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
996 if ( result != noErr || dataSize == 0 ) {
997 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration info for device (" << device << ").";
998 errorText_ = errorStream_.str();
1002 // Allocate the AudioBufferList.
1003 bufferList = (AudioBufferList *) malloc( dataSize );
1004 if ( bufferList == NULL ) {
1005 errorText_ = "RtApiCore::probeDeviceOpen: memory error allocating AudioBufferList.";
1009 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
1010 if (result != noErr || dataSize == 0) {
1012 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration for device (" << device << ").";
1013 errorText_ = errorStream_.str();
1017 // Search for one or more streams that contain the desired number of
1018 // channels. CoreAudio devices can have an arbitrary number of
1019 // streams and each stream can have an arbitrary number of channels.
1020 // For each stream, a single buffer of interleaved samples is
1021 // provided. RtAudio prefers the use of one stream of interleaved
1022 // data or multiple consecutive single-channel streams. However, we
1023 // now support multiple consecutive multi-channel streams of
1024 // interleaved data as well.
1025 UInt32 iStream, offsetCounter = firstChannel;
1026 UInt32 nStreams = bufferList->mNumberBuffers;
1027 bool monoMode = false;
1028 bool foundStream = false;
1030 // First check that the device supports the requested number of
1032 UInt32 deviceChannels = 0;
1033 for ( iStream=0; iStream<nStreams; iStream++ )
1034 deviceChannels += bufferList->mBuffers[iStream].mNumberChannels;
1036 if ( deviceChannels < ( channels + firstChannel ) ) {
1038 errorStream_ << "RtApiCore::probeDeviceOpen: the device (" << device << ") does not support the requested channel count.";
1039 errorText_ = errorStream_.str();
1043 // Look for a single stream meeting our needs.
1044 UInt32 firstStream, streamCount = 1, streamChannels = 0, channelOffset = 0;
1045 for ( iStream=0; iStream<nStreams; iStream++ ) {
1046 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
1047 if ( streamChannels >= channels + offsetCounter ) {
1048 firstStream = iStream;
1049 channelOffset = offsetCounter;
1053 if ( streamChannels > offsetCounter ) break;
1054 offsetCounter -= streamChannels;
1057 // If we didn't find a single stream above, then we should be able
1058 // to meet the channel specification with multiple streams.
1059 if ( foundStream == false ) {
1061 offsetCounter = firstChannel;
1062 for ( iStream=0; iStream<nStreams; iStream++ ) {
1063 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
1064 if ( streamChannels > offsetCounter ) break;
1065 offsetCounter -= streamChannels;
1068 firstStream = iStream;
1069 channelOffset = offsetCounter;
1070 Int32 channelCounter = channels + offsetCounter - streamChannels;
1072 if ( streamChannels > 1 ) monoMode = false;
1073 while ( channelCounter > 0 ) {
1074 streamChannels = bufferList->mBuffers[++iStream].mNumberChannels;
1075 if ( streamChannels > 1 ) monoMode = false;
1076 channelCounter -= streamChannels;
1083 // Determine the buffer size.
1084 AudioValueRange bufferRange;
1085 dataSize = sizeof( AudioValueRange );
1086 property.mSelector = kAudioDevicePropertyBufferFrameSizeRange;
1087 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &bufferRange );
1089 if ( result != noErr ) {
1090 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting buffer size range for device (" << device << ").";
1091 errorText_ = errorStream_.str();
1095 if ( bufferRange.mMinimum > *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMinimum;
1096 else if ( bufferRange.mMaximum < *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMaximum;
1097 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) *bufferSize = (unsigned long) bufferRange.mMinimum;
1099 // Set the buffer size. For multiple streams, I'm assuming we only
1100 // need to make this setting for the master channel.
1101 UInt32 theSize = (UInt32) *bufferSize;
1102 dataSize = sizeof( UInt32 );
1103 property.mSelector = kAudioDevicePropertyBufferFrameSize;
1104 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &theSize );
1106 if ( result != noErr ) {
1107 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting the buffer size for device (" << device << ").";
1108 errorText_ = errorStream_.str();
1112 // If attempting to setup a duplex stream, the bufferSize parameter
1113 // MUST be the same in both directions!
1114 *bufferSize = theSize;
1115 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
1116 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << device << ").";
1117 errorText_ = errorStream_.str();
1121 stream_.bufferSize = *bufferSize;
1122 stream_.nBuffers = 1;
1124 // Try to set "hog" mode ... it's not clear to me this is working.
1125 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) {
1127 dataSize = sizeof( hog_pid );
1128 property.mSelector = kAudioDevicePropertyHogMode;
1129 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &hog_pid );
1130 if ( result != noErr ) {
1131 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting 'hog' state!";
1132 errorText_ = errorStream_.str();
1136 if ( hog_pid != getpid() ) {
1138 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &hog_pid );
1139 if ( result != noErr ) {
1140 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting 'hog' state!";
1141 errorText_ = errorStream_.str();
1147 // Check and if necessary, change the sample rate for the device.
1148 Float64 nominalRate;
1149 dataSize = sizeof( Float64 );
1150 property.mSelector = kAudioDevicePropertyNominalSampleRate;
1151 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &nominalRate );
1152 if ( result != noErr ) {
1153 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting current sample rate.";
1154 errorText_ = errorStream_.str();
1158 // Only change the sample rate if off by more than 1 Hz.
1159 if ( fabs( nominalRate - (double)sampleRate ) > 1.0 ) {
1161 // Set a property listener for the sample rate change
1162 Float64 reportedRate = 0.0;
1163 AudioObjectPropertyAddress tmp = { kAudioDevicePropertyNominalSampleRate, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
1164 result = AudioObjectAddPropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1165 if ( result != noErr ) {
1166 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate property listener for device (" << device << ").";
1167 errorText_ = errorStream_.str();
1171 nominalRate = (Float64) sampleRate;
1172 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &nominalRate );
1173 if ( result != noErr ) {
1174 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1175 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate for device (" << device << ").";
1176 errorText_ = errorStream_.str();
1180 // Now wait until the reported nominal rate is what we just set.
1181 UInt32 microCounter = 0;
1182 while ( reportedRate != nominalRate ) {
1183 microCounter += 5000;
1184 if ( microCounter > 5000000 ) break;
1188 // Remove the property listener.
1189 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1191 if ( microCounter > 5000000 ) {
1192 errorStream_ << "RtApiCore::probeDeviceOpen: timeout waiting for sample rate update for device (" << device << ").";
1193 errorText_ = errorStream_.str();
1198 // Now set the stream format for all streams. Also, check the
1199 // physical format of the device and change that if necessary.
1200 AudioStreamBasicDescription description;
1201 dataSize = sizeof( AudioStreamBasicDescription );
1202 property.mSelector = kAudioStreamPropertyVirtualFormat;
1203 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
1204 if ( result != noErr ) {
1205 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream format for device (" << device << ").";
1206 errorText_ = errorStream_.str();
1210 // Set the sample rate and data format id. However, only make the
1211 // change if the sample rate is not within 1.0 of the desired
1212 // rate and the format is not linear pcm.
1213 bool updateFormat = false;
1214 if ( fabs( description.mSampleRate - (Float64)sampleRate ) > 1.0 ) {
1215 description.mSampleRate = (Float64) sampleRate;
1216 updateFormat = true;
1219 if ( description.mFormatID != kAudioFormatLinearPCM ) {
1220 description.mFormatID = kAudioFormatLinearPCM;
1221 updateFormat = true;
1224 if ( updateFormat ) {
1225 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &description );
1226 if ( result != noErr ) {
1227 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate or data format for device (" << device << ").";
1228 errorText_ = errorStream_.str();
1233 // Now check the physical format.
1234 property.mSelector = kAudioStreamPropertyPhysicalFormat;
1235 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
1236 if ( result != noErr ) {
1237 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream physical format for device (" << device << ").";
1238 errorText_ = errorStream_.str();
1242 //std::cout << "Current physical stream format:" << std::endl;
1243 //std::cout << " mBitsPerChan = " << description.mBitsPerChannel << std::endl;
1244 //std::cout << " aligned high = " << (description.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (description.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
1245 //std::cout << " bytesPerFrame = " << description.mBytesPerFrame << std::endl;
1246 //std::cout << " sample rate = " << description.mSampleRate << std::endl;
1248 if ( description.mFormatID != kAudioFormatLinearPCM || description.mBitsPerChannel < 16 ) {
1249 description.mFormatID = kAudioFormatLinearPCM;
1250 //description.mSampleRate = (Float64) sampleRate;
1251 AudioStreamBasicDescription testDescription = description;
1254 // We'll try higher bit rates first and then work our way down.
1255 std::vector< std::pair<UInt32, UInt32> > physicalFormats;
1256 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsFloat) & ~kLinearPCMFormatFlagIsSignedInteger;
1257 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
1258 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
1259 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
1260 physicalFormats.push_back( std::pair<Float32, UInt32>( 24, formatFlags ) ); // 24-bit packed
1261 formatFlags &= ~( kAudioFormatFlagIsPacked | kAudioFormatFlagIsAlignedHigh );
1262 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.2, formatFlags ) ); // 24-bit in 4 bytes, aligned low
1263 formatFlags |= kAudioFormatFlagIsAlignedHigh;
1264 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.4, formatFlags ) ); // 24-bit in 4 bytes, aligned high
1265 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
1266 physicalFormats.push_back( std::pair<Float32, UInt32>( 16, formatFlags ) );
1267 physicalFormats.push_back( std::pair<Float32, UInt32>( 8, formatFlags ) );
1269 bool setPhysicalFormat = false;
1270 for( unsigned int i=0; i<physicalFormats.size(); i++ ) {
1271 testDescription = description;
1272 testDescription.mBitsPerChannel = (UInt32) physicalFormats[i].first;
1273 testDescription.mFormatFlags = physicalFormats[i].second;
1274 if ( (24 == (UInt32)physicalFormats[i].first) && ~( physicalFormats[i].second & kAudioFormatFlagIsPacked ) )
1275 testDescription.mBytesPerFrame = 4 * testDescription.mChannelsPerFrame;
1277 testDescription.mBytesPerFrame = testDescription.mBitsPerChannel/8 * testDescription.mChannelsPerFrame;
1278 testDescription.mBytesPerPacket = testDescription.mBytesPerFrame * testDescription.mFramesPerPacket;
1279 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &testDescription );
1280 if ( result == noErr ) {
1281 setPhysicalFormat = true;
1282 //std::cout << "Updated physical stream format:" << std::endl;
1283 //std::cout << " mBitsPerChan = " << testDescription.mBitsPerChannel << std::endl;
1284 //std::cout << " aligned high = " << (testDescription.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (testDescription.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
1285 //std::cout << " bytesPerFrame = " << testDescription.mBytesPerFrame << std::endl;
1286 //std::cout << " sample rate = " << testDescription.mSampleRate << std::endl;
1291 if ( !setPhysicalFormat ) {
1292 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting physical data format for device (" << device << ").";
1293 errorText_ = errorStream_.str();
1296 } // done setting virtual/physical formats.
1298 // Get the stream / device latency.
1300 dataSize = sizeof( UInt32 );
1301 property.mSelector = kAudioDevicePropertyLatency;
1302 if ( AudioObjectHasProperty( id, &property ) == true ) {
1303 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &latency );
1304 if ( result == kAudioHardwareNoError ) stream_.latency[ mode ] = latency;
1306 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting device latency for device (" << device << ").";
1307 errorText_ = errorStream_.str();
1308 error( RtAudioError::WARNING );
1312 // Byte-swapping: According to AudioHardware.h, the stream data will
1313 // always be presented in native-endian format, so we should never
1314 // need to byte swap.
1315 stream_.doByteSwap[mode] = false;
1317 // From the CoreAudio documentation, PCM data must be supplied as
1319 stream_.userFormat = format;
1320 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
1322 if ( streamCount == 1 )
1323 stream_.nDeviceChannels[mode] = description.mChannelsPerFrame;
1324 else // multiple streams
1325 stream_.nDeviceChannels[mode] = channels;
1326 stream_.nUserChannels[mode] = channels;
1327 stream_.channelOffset[mode] = channelOffset; // offset within a CoreAudio stream
1328 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
1329 else stream_.userInterleaved = true;
1330 stream_.deviceInterleaved[mode] = true;
1331 if ( monoMode == true ) stream_.deviceInterleaved[mode] = false;
1333 // Set flags for buffer conversion.
1334 stream_.doConvertBuffer[mode] = false;
1335 if ( stream_.userFormat != stream_.deviceFormat[mode] )
1336 stream_.doConvertBuffer[mode] = true;
1337 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
1338 stream_.doConvertBuffer[mode] = true;
1339 if ( streamCount == 1 ) {
1340 if ( stream_.nUserChannels[mode] > 1 &&
1341 stream_.userInterleaved != stream_.deviceInterleaved[mode] )
1342 stream_.doConvertBuffer[mode] = true;
1344 else if ( monoMode && stream_.userInterleaved )
1345 stream_.doConvertBuffer[mode] = true;
1347 // Allocate our CoreHandle structure for the stream.
1348 CoreHandle *handle = 0;
1349 if ( stream_.apiHandle == 0 ) {
1351 handle = new CoreHandle;
1353 catch ( std::bad_alloc& ) {
1354 errorText_ = "RtApiCore::probeDeviceOpen: error allocating CoreHandle memory.";
1358 if ( pthread_cond_init( &handle->condition, NULL ) ) {
1359 errorText_ = "RtApiCore::probeDeviceOpen: error initializing pthread condition variable.";
1362 stream_.apiHandle = (void *) handle;
1365 handle = (CoreHandle *) stream_.apiHandle;
1366 handle->iStream[mode] = firstStream;
1367 handle->nStreams[mode] = streamCount;
1368 handle->id[mode] = id;
1370 // Allocate necessary internal buffers.
1371 unsigned long bufferBytes;
1372 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
1373 // stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
1374 stream_.userBuffer[mode] = (char *) malloc( bufferBytes * sizeof(char) );
1375 memset( stream_.userBuffer[mode], 0, bufferBytes * sizeof(char) );
1376 if ( stream_.userBuffer[mode] == NULL ) {
1377 errorText_ = "RtApiCore::probeDeviceOpen: error allocating user buffer memory.";
1381 // If possible, we will make use of the CoreAudio stream buffers as
1382 // "device buffers". However, we can't do this if using multiple
1384 if ( stream_.doConvertBuffer[mode] && handle->nStreams[mode] > 1 ) {
1386 bool makeBuffer = true;
1387 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
1388 if ( mode == INPUT ) {
1389 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
1390 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
1391 if ( bufferBytes <= bytesOut ) makeBuffer = false;
1396 bufferBytes *= *bufferSize;
1397 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
1398 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
1399 if ( stream_.deviceBuffer == NULL ) {
1400 errorText_ = "RtApiCore::probeDeviceOpen: error allocating device buffer memory.";
1406 stream_.sampleRate = sampleRate;
1407 stream_.device[mode] = device;
1408 stream_.state = STREAM_STOPPED;
1409 stream_.callbackInfo.object = (void *) this;
1411 // Setup the buffer conversion information structure.
1412 if ( stream_.doConvertBuffer[mode] ) {
1413 if ( streamCount > 1 ) setConvertInfo( mode, 0 );
1414 else setConvertInfo( mode, channelOffset );
1417 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device )
1418 // Only one callback procedure per device.
1419 stream_.mode = DUPLEX;
1421 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1422 result = AudioDeviceCreateIOProcID( id, callbackHandler, (void *) &stream_.callbackInfo, &handle->procId[mode] );
1424 // deprecated in favor of AudioDeviceCreateIOProcID()
1425 result = AudioDeviceAddIOProc( id, callbackHandler, (void *) &stream_.callbackInfo );
1427 if ( result != noErr ) {
1428 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting callback for device (" << device << ").";
1429 errorText_ = errorStream_.str();
1432 if ( stream_.mode == OUTPUT && mode == INPUT )
1433 stream_.mode = DUPLEX;
1435 stream_.mode = mode;
1438 // Setup the device property listener for over/underload.
1439 property.mSelector = kAudioDeviceProcessorOverload;
1440 property.mScope = kAudioObjectPropertyScopeGlobal;
1441 result = AudioObjectAddPropertyListener( id, &property, xrunListener, (void *) handle );
1447 pthread_cond_destroy( &handle->condition );
1449 stream_.apiHandle = 0;
1452 for ( int i=0; i<2; i++ ) {
1453 if ( stream_.userBuffer[i] ) {
1454 free( stream_.userBuffer[i] );
1455 stream_.userBuffer[i] = 0;
1459 if ( stream_.deviceBuffer ) {
1460 free( stream_.deviceBuffer );
1461 stream_.deviceBuffer = 0;
1464 stream_.state = STREAM_CLOSED;
1468 void RtApiCore :: closeStream( void )
1470 if ( stream_.state == STREAM_CLOSED ) {
1471 errorText_ = "RtApiCore::closeStream(): no open stream to close!";
1472 error( RtAudioError::WARNING );
1476 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1477 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1479 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
1480 kAudioObjectPropertyScopeGlobal,
1481 kAudioObjectPropertyElementMaster };
1483 property.mSelector = kAudioDeviceProcessorOverload;
1484 property.mScope = kAudioObjectPropertyScopeGlobal;
1485 if (AudioObjectRemovePropertyListener( handle->id[0], &property, xrunListener, (void *) handle ) != noErr) {
1486 errorText_ = "RtApiCore::closeStream(): error removing property listener!";
1487 error( RtAudioError::WARNING );
1490 if ( stream_.state == STREAM_RUNNING )
1491 AudioDeviceStop( handle->id[0], callbackHandler );
1492 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1493 AudioDeviceDestroyIOProcID( handle->id[0], handle->procId[0] );
1495 // deprecated in favor of AudioDeviceDestroyIOProcID()
1496 AudioDeviceRemoveIOProc( handle->id[0], callbackHandler );
1500 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1502 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
1503 kAudioObjectPropertyScopeGlobal,
1504 kAudioObjectPropertyElementMaster };
1506 property.mSelector = kAudioDeviceProcessorOverload;
1507 property.mScope = kAudioObjectPropertyScopeGlobal;
1508 if (AudioObjectRemovePropertyListener( handle->id[1], &property, xrunListener, (void *) handle ) != noErr) {
1509 errorText_ = "RtApiCore::closeStream(): error removing property listener!";
1510 error( RtAudioError::WARNING );
1513 if ( stream_.state == STREAM_RUNNING )
1514 AudioDeviceStop( handle->id[1], callbackHandler );
1515 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1516 AudioDeviceDestroyIOProcID( handle->id[1], handle->procId[1] );
1518 // deprecated in favor of AudioDeviceDestroyIOProcID()
1519 AudioDeviceRemoveIOProc( handle->id[1], callbackHandler );
1523 for ( int i=0; i<2; i++ ) {
1524 if ( stream_.userBuffer[i] ) {
1525 free( stream_.userBuffer[i] );
1526 stream_.userBuffer[i] = 0;
1530 if ( stream_.deviceBuffer ) {
1531 free( stream_.deviceBuffer );
1532 stream_.deviceBuffer = 0;
1535 // Destroy pthread condition variable.
1536 pthread_cond_destroy( &handle->condition );
1538 stream_.apiHandle = 0;
1540 stream_.mode = UNINITIALIZED;
1541 stream_.state = STREAM_CLOSED;
1544 void RtApiCore :: startStream( void )
1547 RtApi::startStream();
1548 if ( stream_.state == STREAM_RUNNING ) {
1549 errorText_ = "RtApiCore::startStream(): the stream is already running!";
1550 error( RtAudioError::WARNING );
1554 #if defined( HAVE_GETTIMEOFDAY )
1555 gettimeofday( &stream_.lastTickTimestamp, NULL );
1558 OSStatus result = noErr;
1559 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1560 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1562 result = AudioDeviceStart( handle->id[0], callbackHandler );
1563 if ( result != noErr ) {
1564 errorStream_ << "RtApiCore::startStream: system error (" << getErrorCode( result ) << ") starting callback procedure on device (" << stream_.device[0] << ").";
1565 errorText_ = errorStream_.str();
1570 if ( stream_.mode == INPUT ||
1571 ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1573 result = AudioDeviceStart( handle->id[1], callbackHandler );
1574 if ( result != noErr ) {
1575 errorStream_ << "RtApiCore::startStream: system error starting input callback procedure on device (" << stream_.device[1] << ").";
1576 errorText_ = errorStream_.str();
1581 handle->drainCounter = 0;
1582 handle->internalDrain = false;
1583 stream_.state = STREAM_RUNNING;
1586 if ( result == noErr ) return;
1587 error( RtAudioError::SYSTEM_ERROR );
1590 void RtApiCore :: stopStream( void )
1593 if ( stream_.state == STREAM_STOPPED ) {
1594 errorText_ = "RtApiCore::stopStream(): the stream is already stopped!";
1595 error( RtAudioError::WARNING );
1599 OSStatus result = noErr;
1600 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1601 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1603 if ( handle->drainCounter == 0 ) {
1604 handle->drainCounter = 2;
1605 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
1608 result = AudioDeviceStop( handle->id[0], callbackHandler );
1609 if ( result != noErr ) {
1610 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping callback procedure on device (" << stream_.device[0] << ").";
1611 errorText_ = errorStream_.str();
1616 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1618 result = AudioDeviceStop( handle->id[1], callbackHandler );
1619 if ( result != noErr ) {
1620 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping input callback procedure on device (" << stream_.device[1] << ").";
1621 errorText_ = errorStream_.str();
1626 stream_.state = STREAM_STOPPED;
1629 if ( result == noErr ) return;
1630 error( RtAudioError::SYSTEM_ERROR );
1633 void RtApiCore :: abortStream( void )
1636 if ( stream_.state == STREAM_STOPPED ) {
1637 errorText_ = "RtApiCore::abortStream(): the stream is already stopped!";
1638 error( RtAudioError::WARNING );
1642 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1643 handle->drainCounter = 2;
1648 // This function will be called by a spawned thread when the user
1649 // callback function signals that the stream should be stopped or
1650 // aborted. It is better to handle it this way because the
1651 // callbackEvent() function probably should return before the AudioDeviceStop()
1652 // function is called.
1653 static void *coreStopStream( void *ptr )
1655 CallbackInfo *info = (CallbackInfo *) ptr;
1656 RtApiCore *object = (RtApiCore *) info->object;
1658 object->stopStream();
1659 pthread_exit( NULL );
1662 bool RtApiCore :: callbackEvent( AudioDeviceID deviceId,
1663 const AudioBufferList *inBufferList,
1664 const AudioBufferList *outBufferList )
1666 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
1667 if ( stream_.state == STREAM_CLOSED ) {
1668 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
1669 error( RtAudioError::WARNING );
1673 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
1674 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1676 // Check if we were draining the stream and signal is finished.
1677 if ( handle->drainCounter > 3 ) {
1678 ThreadHandle threadId;
1680 stream_.state = STREAM_STOPPING;
1681 if ( handle->internalDrain == true )
1682 pthread_create( &threadId, NULL, coreStopStream, info );
1683 else // external call to stopStream()
1684 pthread_cond_signal( &handle->condition );
1688 AudioDeviceID outputDevice = handle->id[0];
1690 // Invoke user callback to get fresh output data UNLESS we are
1691 // draining stream or duplex mode AND the input/output devices are
1692 // different AND this function is called for the input device.
1693 if ( handle->drainCounter == 0 && ( stream_.mode != DUPLEX || deviceId == outputDevice ) ) {
1694 RtAudioCallback callback = (RtAudioCallback) info->callback;
1695 double streamTime = getStreamTime();
1696 RtAudioStreamStatus status = 0;
1697 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
1698 status |= RTAUDIO_OUTPUT_UNDERFLOW;
1699 handle->xrun[0] = false;
1701 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
1702 status |= RTAUDIO_INPUT_OVERFLOW;
1703 handle->xrun[1] = false;
1706 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
1707 stream_.bufferSize, streamTime, status, info->userData );
1708 if ( cbReturnValue == 2 ) {
1709 stream_.state = STREAM_STOPPING;
1710 handle->drainCounter = 2;
1714 else if ( cbReturnValue == 1 ) {
1715 handle->drainCounter = 1;
1716 handle->internalDrain = true;
1720 if ( stream_.mode == OUTPUT || ( stream_.mode == DUPLEX && deviceId == outputDevice ) ) {
1722 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
1724 if ( handle->nStreams[0] == 1 ) {
1725 memset( outBufferList->mBuffers[handle->iStream[0]].mData,
1727 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
1729 else { // fill multiple streams with zeros
1730 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
1731 memset( outBufferList->mBuffers[handle->iStream[0]+i].mData,
1733 outBufferList->mBuffers[handle->iStream[0]+i].mDataByteSize );
1737 else if ( handle->nStreams[0] == 1 ) {
1738 if ( stream_.doConvertBuffer[0] ) { // convert directly to CoreAudio stream buffer
1739 convertBuffer( (char *) outBufferList->mBuffers[handle->iStream[0]].mData,
1740 stream_.userBuffer[0], stream_.convertInfo[0] );
1742 else { // copy from user buffer
1743 memcpy( outBufferList->mBuffers[handle->iStream[0]].mData,
1744 stream_.userBuffer[0],
1745 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
1748 else { // fill multiple streams
1749 Float32 *inBuffer = (Float32 *) stream_.userBuffer[0];
1750 if ( stream_.doConvertBuffer[0] ) {
1751 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
1752 inBuffer = (Float32 *) stream_.deviceBuffer;
1755 if ( stream_.deviceInterleaved[0] == false ) { // mono mode
1756 UInt32 bufferBytes = outBufferList->mBuffers[handle->iStream[0]].mDataByteSize;
1757 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
1758 memcpy( outBufferList->mBuffers[handle->iStream[0]+i].mData,
1759 (void *)&inBuffer[i*stream_.bufferSize], bufferBytes );
1762 else { // fill multiple multi-channel streams with interleaved data
1763 UInt32 streamChannels, channelsLeft, inJump, outJump, inOffset;
1766 bool inInterleaved = ( stream_.userInterleaved ) ? true : false;
1767 UInt32 inChannels = stream_.nUserChannels[0];
1768 if ( stream_.doConvertBuffer[0] ) {
1769 inInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
1770 inChannels = stream_.nDeviceChannels[0];
1773 if ( inInterleaved ) inOffset = 1;
1774 else inOffset = stream_.bufferSize;
1776 channelsLeft = inChannels;
1777 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
1779 out = (Float32 *) outBufferList->mBuffers[handle->iStream[0]+i].mData;
1780 streamChannels = outBufferList->mBuffers[handle->iStream[0]+i].mNumberChannels;
1783 // Account for possible channel offset in first stream
1784 if ( i == 0 && stream_.channelOffset[0] > 0 ) {
1785 streamChannels -= stream_.channelOffset[0];
1786 outJump = stream_.channelOffset[0];
1790 // Account for possible unfilled channels at end of the last stream
1791 if ( streamChannels > channelsLeft ) {
1792 outJump = streamChannels - channelsLeft;
1793 streamChannels = channelsLeft;
1796 // Determine input buffer offsets and skips
1797 if ( inInterleaved ) {
1798 inJump = inChannels;
1799 in += inChannels - channelsLeft;
1803 in += (inChannels - channelsLeft) * inOffset;
1806 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
1807 for ( unsigned int j=0; j<streamChannels; j++ ) {
1808 *out++ = in[j*inOffset];
1813 channelsLeft -= streamChannels;
1819 // Don't bother draining input
1820 if ( handle->drainCounter ) {
1821 handle->drainCounter++;
1825 AudioDeviceID inputDevice;
1826 inputDevice = handle->id[1];
1827 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && deviceId == inputDevice ) ) {
1829 if ( handle->nStreams[1] == 1 ) {
1830 if ( stream_.doConvertBuffer[1] ) { // convert directly from CoreAudio stream buffer
1831 convertBuffer( stream_.userBuffer[1],
1832 (char *) inBufferList->mBuffers[handle->iStream[1]].mData,
1833 stream_.convertInfo[1] );
1835 else { // copy to user buffer
1836 memcpy( stream_.userBuffer[1],
1837 inBufferList->mBuffers[handle->iStream[1]].mData,
1838 inBufferList->mBuffers[handle->iStream[1]].mDataByteSize );
1841 else { // read from multiple streams
1842 Float32 *outBuffer = (Float32 *) stream_.userBuffer[1];
1843 if ( stream_.doConvertBuffer[1] ) outBuffer = (Float32 *) stream_.deviceBuffer;
1845 if ( stream_.deviceInterleaved[1] == false ) { // mono mode
1846 UInt32 bufferBytes = inBufferList->mBuffers[handle->iStream[1]].mDataByteSize;
1847 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
1848 memcpy( (void *)&outBuffer[i*stream_.bufferSize],
1849 inBufferList->mBuffers[handle->iStream[1]+i].mData, bufferBytes );
1852 else { // read from multiple multi-channel streams
1853 UInt32 streamChannels, channelsLeft, inJump, outJump, outOffset;
1856 bool outInterleaved = ( stream_.userInterleaved ) ? true : false;
1857 UInt32 outChannels = stream_.nUserChannels[1];
1858 if ( stream_.doConvertBuffer[1] ) {
1859 outInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
1860 outChannels = stream_.nDeviceChannels[1];
1863 if ( outInterleaved ) outOffset = 1;
1864 else outOffset = stream_.bufferSize;
1866 channelsLeft = outChannels;
1867 for ( unsigned int i=0; i<handle->nStreams[1]; i++ ) {
1869 in = (Float32 *) inBufferList->mBuffers[handle->iStream[1]+i].mData;
1870 streamChannels = inBufferList->mBuffers[handle->iStream[1]+i].mNumberChannels;
1873 // Account for possible channel offset in first stream
1874 if ( i == 0 && stream_.channelOffset[1] > 0 ) {
1875 streamChannels -= stream_.channelOffset[1];
1876 inJump = stream_.channelOffset[1];
1880 // Account for possible unread channels at end of the last stream
1881 if ( streamChannels > channelsLeft ) {
1882 inJump = streamChannels - channelsLeft;
1883 streamChannels = channelsLeft;
1886 // Determine output buffer offsets and skips
1887 if ( outInterleaved ) {
1888 outJump = outChannels;
1889 out += outChannels - channelsLeft;
1893 out += (outChannels - channelsLeft) * outOffset;
1896 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
1897 for ( unsigned int j=0; j<streamChannels; j++ ) {
1898 out[j*outOffset] = *in++;
1903 channelsLeft -= streamChannels;
1907 if ( stream_.doConvertBuffer[1] ) { // convert from our internal "device" buffer
1908 convertBuffer( stream_.userBuffer[1],
1909 stream_.deviceBuffer,
1910 stream_.convertInfo[1] );
1916 //MUTEX_UNLOCK( &stream_.mutex );
1918 RtApi::tickStreamTime();
1922 const char* RtApiCore :: getErrorCode( OSStatus code )
1926 case kAudioHardwareNotRunningError:
1927 return "kAudioHardwareNotRunningError";
1929 case kAudioHardwareUnspecifiedError:
1930 return "kAudioHardwareUnspecifiedError";
1932 case kAudioHardwareUnknownPropertyError:
1933 return "kAudioHardwareUnknownPropertyError";
1935 case kAudioHardwareBadPropertySizeError:
1936 return "kAudioHardwareBadPropertySizeError";
1938 case kAudioHardwareIllegalOperationError:
1939 return "kAudioHardwareIllegalOperationError";
1941 case kAudioHardwareBadObjectError:
1942 return "kAudioHardwareBadObjectError";
1944 case kAudioHardwareBadDeviceError:
1945 return "kAudioHardwareBadDeviceError";
1947 case kAudioHardwareBadStreamError:
1948 return "kAudioHardwareBadStreamError";
1950 case kAudioHardwareUnsupportedOperationError:
1951 return "kAudioHardwareUnsupportedOperationError";
1953 case kAudioDeviceUnsupportedFormatError:
1954 return "kAudioDeviceUnsupportedFormatError";
1956 case kAudioDevicePermissionsError:
1957 return "kAudioDevicePermissionsError";
1960 return "CoreAudio unknown error";
1964 //******************** End of __MACOSX_CORE__ *********************//
1967 #if defined(__UNIX_JACK__)
1969 // JACK is a low-latency audio server, originally written for the
1970 // GNU/Linux operating system and now also ported to OS-X. It can
1971 // connect a number of different applications to an audio device, as
1972 // well as allowing them to share audio between themselves.
1974 // When using JACK with RtAudio, "devices" refer to JACK clients that
1975 // have ports connected to the server. The JACK server is typically
1976 // started in a terminal as follows:
1978 // .jackd -d alsa -d hw:0
1980 // or through an interface program such as qjackctl. Many of the
1981 // parameters normally set for a stream are fixed by the JACK server
1982 // and can be specified when the JACK server is started. In
1985 // .jackd -d alsa -d hw:0 -r 44100 -p 512 -n 4
1987 // specifies a sample rate of 44100 Hz, a buffer size of 512 sample
1988 // frames, and number of buffers = 4. Once the server is running, it
1989 // is not possible to override these values. If the values are not
1990 // specified in the command-line, the JACK server uses default values.
1992 // The JACK server does not have to be running when an instance of
1993 // RtApiJack is created, though the function getDeviceCount() will
1994 // report 0 devices found until JACK has been started. When no
1995 // devices are available (i.e., the JACK server is not running), a
1996 // stream cannot be opened.
1998 #include <jack/jack.h>
2002 // A structure to hold various information related to the Jack API
2005 jack_client_t *client;
2006 jack_port_t **ports[2];
2007 std::string deviceName[2];
2009 pthread_cond_t condition;
2010 int drainCounter; // Tracks callback counts when draining
2011 bool internalDrain; // Indicates if stop is initiated from callback or not.
2014 :client(0), drainCounter(0), internalDrain(false) { ports[0] = 0; ports[1] = 0; xrun[0] = false; xrun[1] = false; }
2017 #if !defined(__RTAUDIO_DEBUG__)
2018 static void jackSilentError( const char * ) {};
2021 RtApiJack :: RtApiJack()
2022 :shouldAutoconnect_(true) {
2023 // Nothing to do here.
2024 #if !defined(__RTAUDIO_DEBUG__)
2025 // Turn off Jack's internal error reporting.
2026 jack_set_error_function( &jackSilentError );
2030 RtApiJack :: ~RtApiJack()
2032 if ( stream_.state != STREAM_CLOSED ) closeStream();
2035 unsigned int RtApiJack :: getDeviceCount( void )
2037 // See if we can become a jack client.
2038 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
2039 jack_status_t *status = NULL;
2040 jack_client_t *client = jack_client_open( "RtApiJackCount", options, status );
2041 if ( client == 0 ) return 0;
2044 std::string port, previousPort;
2045 unsigned int nChannels = 0, nDevices = 0;
2046 ports = jack_get_ports( client, NULL, JACK_DEFAULT_AUDIO_TYPE, 0 );
2048 // Parse the port names up to the first colon (:).
2051 port = (char *) ports[ nChannels ];
2052 iColon = port.find(":");
2053 if ( iColon != std::string::npos ) {
2054 port = port.substr( 0, iColon + 1 );
2055 if ( port != previousPort ) {
2057 previousPort = port;
2060 } while ( ports[++nChannels] );
2064 jack_client_close( client );
2068 RtAudio::DeviceInfo RtApiJack :: getDeviceInfo( unsigned int device )
2070 RtAudio::DeviceInfo info;
2071 info.probed = false;
2073 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption
2074 jack_status_t *status = NULL;
2075 jack_client_t *client = jack_client_open( "RtApiJackInfo", options, status );
2076 if ( client == 0 ) {
2077 errorText_ = "RtApiJack::getDeviceInfo: Jack server not found or connection error!";
2078 error( RtAudioError::WARNING );
2083 std::string port, previousPort;
2084 unsigned int nPorts = 0, nDevices = 0;
2085 ports = jack_get_ports( client, NULL, JACK_DEFAULT_AUDIO_TYPE, 0 );
2087 // Parse the port names up to the first colon (:).
2090 port = (char *) ports[ nPorts ];
2091 iColon = port.find(":");
2092 if ( iColon != std::string::npos ) {
2093 port = port.substr( 0, iColon );
2094 if ( port != previousPort ) {
2095 if ( nDevices == device ) info.name = port;
2097 previousPort = port;
2100 } while ( ports[++nPorts] );
2104 if ( device >= nDevices ) {
2105 jack_client_close( client );
2106 errorText_ = "RtApiJack::getDeviceInfo: device ID is invalid!";
2107 error( RtAudioError::INVALID_USE );
2111 // Get the current jack server sample rate.
2112 info.sampleRates.clear();
2114 info.preferredSampleRate = jack_get_sample_rate( client );
2115 info.sampleRates.push_back( info.preferredSampleRate );
2117 // Count the available ports containing the client name as device
2118 // channels. Jack "input ports" equal RtAudio output channels.
2119 unsigned int nChannels = 0;
2120 ports = jack_get_ports( client, info.name.c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput );
2122 while ( ports[ nChannels ] ) nChannels++;
2124 info.outputChannels = nChannels;
2127 // Jack "output ports" equal RtAudio input channels.
2129 ports = jack_get_ports( client, info.name.c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput );
2131 while ( ports[ nChannels ] ) nChannels++;
2133 info.inputChannels = nChannels;
2136 if ( info.outputChannels == 0 && info.inputChannels == 0 ) {
2137 jack_client_close(client);
2138 errorText_ = "RtApiJack::getDeviceInfo: error determining Jack input/output channels!";
2139 error( RtAudioError::WARNING );
2143 // If device opens for both playback and capture, we determine the channels.
2144 if ( info.outputChannels > 0 && info.inputChannels > 0 )
2145 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
2147 // Jack always uses 32-bit floats.
2148 info.nativeFormats = RTAUDIO_FLOAT32;
2150 // Jack doesn't provide default devices so we'll use the first available one.
2151 if ( device == 0 && info.outputChannels > 0 )
2152 info.isDefaultOutput = true;
2153 if ( device == 0 && info.inputChannels > 0 )
2154 info.isDefaultInput = true;
2156 jack_client_close(client);
2161 static int jackCallbackHandler( jack_nframes_t nframes, void *infoPointer )
2163 CallbackInfo *info = (CallbackInfo *) infoPointer;
2165 RtApiJack *object = (RtApiJack *) info->object;
2166 if ( object->callbackEvent( (unsigned long) nframes ) == false ) return 1;
2171 // This function will be called by a spawned thread when the Jack
2172 // server signals that it is shutting down. It is necessary to handle
2173 // it this way because the jackShutdown() function must return before
2174 // the jack_deactivate() function (in closeStream()) will return.
2175 static void *jackCloseStream( void *ptr )
2177 CallbackInfo *info = (CallbackInfo *) ptr;
2178 RtApiJack *object = (RtApiJack *) info->object;
2180 object->closeStream();
2182 pthread_exit( NULL );
2184 static void jackShutdown( void *infoPointer )
2186 CallbackInfo *info = (CallbackInfo *) infoPointer;
2187 RtApiJack *object = (RtApiJack *) info->object;
2189 // Check current stream state. If stopped, then we'll assume this
2190 // was called as a result of a call to RtApiJack::stopStream (the
2191 // deactivation of a client handle causes this function to be called).
2192 // If not, we'll assume the Jack server is shutting down or some
2193 // other problem occurred and we should close the stream.
2194 if ( object->isStreamRunning() == false ) return;
2196 ThreadHandle threadId;
2197 pthread_create( &threadId, NULL, jackCloseStream, info );
2198 std::cerr << "\nRtApiJack: the Jack server is shutting down this client ... stream stopped and closed!!\n" << std::endl;
2201 static int jackXrun( void *infoPointer )
2203 JackHandle *handle = *((JackHandle **) infoPointer);
2205 if ( handle->ports[0] ) handle->xrun[0] = true;
2206 if ( handle->ports[1] ) handle->xrun[1] = true;
2211 bool RtApiJack :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
2212 unsigned int firstChannel, unsigned int sampleRate,
2213 RtAudioFormat format, unsigned int *bufferSize,
2214 RtAudio::StreamOptions *options )
2216 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2218 // Look for jack server and try to become a client (only do once per stream).
2219 jack_client_t *client = 0;
2220 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) {
2221 jack_options_t jackoptions = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
2222 jack_status_t *status = NULL;
2223 if ( options && !options->streamName.empty() )
2224 client = jack_client_open( options->streamName.c_str(), jackoptions, status );
2226 client = jack_client_open( "RtApiJack", jackoptions, status );
2227 if ( client == 0 ) {
2228 errorText_ = "RtApiJack::probeDeviceOpen: Jack server not found or connection error!";
2229 error( RtAudioError::WARNING );
2234 // The handle must have been created on an earlier pass.
2235 client = handle->client;
2239 std::string port, previousPort, deviceName;
2240 unsigned int nPorts = 0, nDevices = 0;
2241 ports = jack_get_ports( client, NULL, JACK_DEFAULT_AUDIO_TYPE, 0 );
2243 // Parse the port names up to the first colon (:).
2246 port = (char *) ports[ nPorts ];
2247 iColon = port.find(":");
2248 if ( iColon != std::string::npos ) {
2249 port = port.substr( 0, iColon );
2250 if ( port != previousPort ) {
2251 if ( nDevices == device ) deviceName = port;
2253 previousPort = port;
2256 } while ( ports[++nPorts] );
2260 if ( device >= nDevices ) {
2261 errorText_ = "RtApiJack::probeDeviceOpen: device ID is invalid!";
2265 unsigned long flag = JackPortIsInput;
2266 if ( mode == INPUT ) flag = JackPortIsOutput;
2268 if ( ! (options && (options->flags & RTAUDIO_JACK_DONT_CONNECT)) ) {
2269 // Count the available ports containing the client name as device
2270 // channels. Jack "input ports" equal RtAudio output channels.
2271 unsigned int nChannels = 0;
2272 ports = jack_get_ports( client, deviceName.c_str(), JACK_DEFAULT_AUDIO_TYPE, flag );
2274 while ( ports[ nChannels ] ) nChannels++;
2277 // Compare the jack ports for specified client to the requested number of channels.
2278 if ( nChannels < (channels + firstChannel) ) {
2279 errorStream_ << "RtApiJack::probeDeviceOpen: requested number of channels (" << channels << ") + offset (" << firstChannel << ") not found for specified device (" << device << ":" << deviceName << ").";
2280 errorText_ = errorStream_.str();
2285 // Check the jack server sample rate.
2286 unsigned int jackRate = jack_get_sample_rate( client );
2287 if ( sampleRate != jackRate ) {
2288 jack_client_close( client );
2289 errorStream_ << "RtApiJack::probeDeviceOpen: the requested sample rate (" << sampleRate << ") is different than the JACK server rate (" << jackRate << ").";
2290 errorText_ = errorStream_.str();
2293 stream_.sampleRate = jackRate;
2295 // Get the latency of the JACK port.
2296 ports = jack_get_ports( client, deviceName.c_str(), JACK_DEFAULT_AUDIO_TYPE, flag );
2297 if ( ports[ firstChannel ] ) {
2299 jack_latency_callback_mode_t cbmode = (mode == INPUT ? JackCaptureLatency : JackPlaybackLatency);
2300 // the range (usually the min and max are equal)
2301 jack_latency_range_t latrange; latrange.min = latrange.max = 0;
2302 // get the latency range
2303 jack_port_get_latency_range( jack_port_by_name( client, ports[firstChannel] ), cbmode, &latrange );
2304 // be optimistic, use the min!
2305 stream_.latency[mode] = latrange.min;
2306 //stream_.latency[mode] = jack_port_get_latency( jack_port_by_name( client, ports[ firstChannel ] ) );
2310 // The jack server always uses 32-bit floating-point data.
2311 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
2312 stream_.userFormat = format;
2314 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
2315 else stream_.userInterleaved = true;
2317 // Jack always uses non-interleaved buffers.
2318 stream_.deviceInterleaved[mode] = false;
2320 // Jack always provides host byte-ordered data.
2321 stream_.doByteSwap[mode] = false;
2323 // Get the buffer size. The buffer size and number of buffers
2324 // (periods) is set when the jack server is started.
2325 stream_.bufferSize = (int) jack_get_buffer_size( client );
2326 *bufferSize = stream_.bufferSize;
2328 stream_.nDeviceChannels[mode] = channels;
2329 stream_.nUserChannels[mode] = channels;
2331 // Set flags for buffer conversion.
2332 stream_.doConvertBuffer[mode] = false;
2333 if ( stream_.userFormat != stream_.deviceFormat[mode] )
2334 stream_.doConvertBuffer[mode] = true;
2335 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
2336 stream_.nUserChannels[mode] > 1 )
2337 stream_.doConvertBuffer[mode] = true;
2339 // Allocate our JackHandle structure for the stream.
2340 if ( handle == 0 ) {
2342 handle = new JackHandle;
2344 catch ( std::bad_alloc& ) {
2345 errorText_ = "RtApiJack::probeDeviceOpen: error allocating JackHandle memory.";
2349 if ( pthread_cond_init(&handle->condition, NULL) ) {
2350 errorText_ = "RtApiJack::probeDeviceOpen: error initializing pthread condition variable.";
2353 stream_.apiHandle = (void *) handle;
2354 handle->client = client;
2356 handle->deviceName[mode] = deviceName;
2358 // Allocate necessary internal buffers.
2359 unsigned long bufferBytes;
2360 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
2361 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
2362 if ( stream_.userBuffer[mode] == NULL ) {
2363 errorText_ = "RtApiJack::probeDeviceOpen: error allocating user buffer memory.";
2367 if ( stream_.doConvertBuffer[mode] ) {
2369 bool makeBuffer = true;
2370 if ( mode == OUTPUT )
2371 bufferBytes = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
2372 else { // mode == INPUT
2373 bufferBytes = stream_.nDeviceChannels[1] * formatBytes( stream_.deviceFormat[1] );
2374 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
2375 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes(stream_.deviceFormat[0]);
2376 if ( bufferBytes < bytesOut ) makeBuffer = false;
2381 bufferBytes *= *bufferSize;
2382 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
2383 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
2384 if ( stream_.deviceBuffer == NULL ) {
2385 errorText_ = "RtApiJack::probeDeviceOpen: error allocating device buffer memory.";
2391 // Allocate memory for the Jack ports (channels) identifiers.
2392 handle->ports[mode] = (jack_port_t **) malloc ( sizeof (jack_port_t *) * channels );
2393 if ( handle->ports[mode] == NULL ) {
2394 errorText_ = "RtApiJack::probeDeviceOpen: error allocating port memory.";
2398 stream_.device[mode] = device;
2399 stream_.channelOffset[mode] = firstChannel;
2400 stream_.state = STREAM_STOPPED;
2401 stream_.callbackInfo.object = (void *) this;
2403 if ( stream_.mode == OUTPUT && mode == INPUT )
2404 // We had already set up the stream for output.
2405 stream_.mode = DUPLEX;
2407 stream_.mode = mode;
2408 jack_set_process_callback( handle->client, jackCallbackHandler, (void *) &stream_.callbackInfo );
2409 jack_set_xrun_callback( handle->client, jackXrun, (void *) &stream_.apiHandle );
2410 jack_on_shutdown( handle->client, jackShutdown, (void *) &stream_.callbackInfo );
2413 // Register our ports.
2415 if ( mode == OUTPUT ) {
2416 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2417 snprintf( label, 64, "outport %d", i );
2418 handle->ports[0][i] = jack_port_register( handle->client, (const char *)label,
2419 JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput, 0 );
2423 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2424 snprintf( label, 64, "inport %d", i );
2425 handle->ports[1][i] = jack_port_register( handle->client, (const char *)label,
2426 JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput, 0 );
2430 // Setup the buffer conversion information structure. We don't use
2431 // buffers to do channel offsets, so we override that parameter
2433 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
2435 if ( options && options->flags & RTAUDIO_JACK_DONT_CONNECT ) shouldAutoconnect_ = false;
2441 pthread_cond_destroy( &handle->condition );
2442 jack_client_close( handle->client );
2444 if ( handle->ports[0] ) free( handle->ports[0] );
2445 if ( handle->ports[1] ) free( handle->ports[1] );
2448 stream_.apiHandle = 0;
2451 for ( int i=0; i<2; i++ ) {
2452 if ( stream_.userBuffer[i] ) {
2453 free( stream_.userBuffer[i] );
2454 stream_.userBuffer[i] = 0;
2458 if ( stream_.deviceBuffer ) {
2459 free( stream_.deviceBuffer );
2460 stream_.deviceBuffer = 0;
2466 void RtApiJack :: closeStream( void )
2468 if ( stream_.state == STREAM_CLOSED ) {
2469 errorText_ = "RtApiJack::closeStream(): no open stream to close!";
2470 error( RtAudioError::WARNING );
2474 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2477 if ( stream_.state == STREAM_RUNNING )
2478 jack_deactivate( handle->client );
2480 jack_client_close( handle->client );
2484 if ( handle->ports[0] ) free( handle->ports[0] );
2485 if ( handle->ports[1] ) free( handle->ports[1] );
2486 pthread_cond_destroy( &handle->condition );
2488 stream_.apiHandle = 0;
2491 for ( int i=0; i<2; i++ ) {
2492 if ( stream_.userBuffer[i] ) {
2493 free( stream_.userBuffer[i] );
2494 stream_.userBuffer[i] = 0;
2498 if ( stream_.deviceBuffer ) {
2499 free( stream_.deviceBuffer );
2500 stream_.deviceBuffer = 0;
2503 stream_.mode = UNINITIALIZED;
2504 stream_.state = STREAM_CLOSED;
2507 void RtApiJack :: startStream( void )
2510 RtApi::startStream();
2511 if ( stream_.state == STREAM_RUNNING ) {
2512 errorText_ = "RtApiJack::startStream(): the stream is already running!";
2513 error( RtAudioError::WARNING );
2517 #if defined( HAVE_GETTIMEOFDAY )
2518 gettimeofday( &stream_.lastTickTimestamp, NULL );
2521 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2522 int result = jack_activate( handle->client );
2524 errorText_ = "RtApiJack::startStream(): unable to activate JACK client!";
2530 // Get the list of available ports.
2531 if ( shouldAutoconnect_ && (stream_.mode == OUTPUT || stream_.mode == DUPLEX) ) {
2533 ports = jack_get_ports( handle->client, handle->deviceName[0].c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput);
2534 if ( ports == NULL) {
2535 errorText_ = "RtApiJack::startStream(): error determining available JACK input ports!";
2539 // Now make the port connections. Since RtAudio wasn't designed to
2540 // allow the user to select particular channels of a device, we'll
2541 // just open the first "nChannels" ports with offset.
2542 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2544 if ( ports[ stream_.channelOffset[0] + i ] )
2545 result = jack_connect( handle->client, jack_port_name( handle->ports[0][i] ), ports[ stream_.channelOffset[0] + i ] );
2548 errorText_ = "RtApiJack::startStream(): error connecting output ports!";
2555 if ( shouldAutoconnect_ && (stream_.mode == INPUT || stream_.mode == DUPLEX) ) {
2557 ports = jack_get_ports( handle->client, handle->deviceName[1].c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput );
2558 if ( ports == NULL) {
2559 errorText_ = "RtApiJack::startStream(): error determining available JACK output ports!";
2563 // Now make the port connections. See note above.
2564 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2566 if ( ports[ stream_.channelOffset[1] + i ] )
2567 result = jack_connect( handle->client, ports[ stream_.channelOffset[1] + i ], jack_port_name( handle->ports[1][i] ) );
2570 errorText_ = "RtApiJack::startStream(): error connecting input ports!";
2577 handle->drainCounter = 0;
2578 handle->internalDrain = false;
2579 stream_.state = STREAM_RUNNING;
2582 if ( result == 0 ) return;
2583 error( RtAudioError::SYSTEM_ERROR );
2586 void RtApiJack :: stopStream( void )
2589 if ( stream_.state == STREAM_STOPPED ) {
2590 errorText_ = "RtApiJack::stopStream(): the stream is already stopped!";
2591 error( RtAudioError::WARNING );
2595 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2596 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
2598 if ( handle->drainCounter == 0 ) {
2599 handle->drainCounter = 2;
2600 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
2604 jack_deactivate( handle->client );
2605 stream_.state = STREAM_STOPPED;
2608 void RtApiJack :: abortStream( void )
2611 if ( stream_.state == STREAM_STOPPED ) {
2612 errorText_ = "RtApiJack::abortStream(): the stream is already stopped!";
2613 error( RtAudioError::WARNING );
2617 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2618 handle->drainCounter = 2;
2623 // This function will be called by a spawned thread when the user
2624 // callback function signals that the stream should be stopped or
2625 // aborted. It is necessary to handle it this way because the
2626 // callbackEvent() function must return before the jack_deactivate()
2627 // function will return.
2628 static void *jackStopStream( void *ptr )
2630 CallbackInfo *info = (CallbackInfo *) ptr;
2631 RtApiJack *object = (RtApiJack *) info->object;
2633 object->stopStream();
2634 pthread_exit( NULL );
2637 bool RtApiJack :: callbackEvent( unsigned long nframes )
2639 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
2640 if ( stream_.state == STREAM_CLOSED ) {
2641 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
2642 error( RtAudioError::WARNING );
2645 if ( stream_.bufferSize != nframes ) {
2646 errorText_ = "RtApiCore::callbackEvent(): the JACK buffer size has changed ... cannot process!";
2647 error( RtAudioError::WARNING );
2651 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
2652 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2654 // Check if we were draining the stream and signal is finished.
2655 if ( handle->drainCounter > 3 ) {
2656 ThreadHandle threadId;
2658 stream_.state = STREAM_STOPPING;
2659 if ( handle->internalDrain == true )
2660 pthread_create( &threadId, NULL, jackStopStream, info );
2662 pthread_cond_signal( &handle->condition );
2666 // Invoke user callback first, to get fresh output data.
2667 if ( handle->drainCounter == 0 ) {
2668 RtAudioCallback callback = (RtAudioCallback) info->callback;
2669 double streamTime = getStreamTime();
2670 RtAudioStreamStatus status = 0;
2671 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
2672 status |= RTAUDIO_OUTPUT_UNDERFLOW;
2673 handle->xrun[0] = false;
2675 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
2676 status |= RTAUDIO_INPUT_OVERFLOW;
2677 handle->xrun[1] = false;
2679 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
2680 stream_.bufferSize, streamTime, status, info->userData );
2681 if ( cbReturnValue == 2 ) {
2682 stream_.state = STREAM_STOPPING;
2683 handle->drainCounter = 2;
2685 pthread_create( &id, NULL, jackStopStream, info );
2688 else if ( cbReturnValue == 1 ) {
2689 handle->drainCounter = 1;
2690 handle->internalDrain = true;
2694 jack_default_audio_sample_t *jackbuffer;
2695 unsigned long bufferBytes = nframes * sizeof( jack_default_audio_sample_t );
2696 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
2698 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
2700 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
2701 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2702 memset( jackbuffer, 0, bufferBytes );
2706 else if ( stream_.doConvertBuffer[0] ) {
2708 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
2710 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
2711 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2712 memcpy( jackbuffer, &stream_.deviceBuffer[i*bufferBytes], bufferBytes );
2715 else { // no buffer conversion
2716 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2717 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2718 memcpy( jackbuffer, &stream_.userBuffer[0][i*bufferBytes], bufferBytes );
2723 // Don't bother draining input
2724 if ( handle->drainCounter ) {
2725 handle->drainCounter++;
2729 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
2731 if ( stream_.doConvertBuffer[1] ) {
2732 for ( unsigned int i=0; i<stream_.nDeviceChannels[1]; i++ ) {
2733 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
2734 memcpy( &stream_.deviceBuffer[i*bufferBytes], jackbuffer, bufferBytes );
2736 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
2738 else { // no buffer conversion
2739 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2740 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
2741 memcpy( &stream_.userBuffer[1][i*bufferBytes], jackbuffer, bufferBytes );
2747 RtApi::tickStreamTime();
2750 //******************** End of __UNIX_JACK__ *********************//
2753 #if defined(__WINDOWS_ASIO__) // ASIO API on Windows
2755 // The ASIO API is designed around a callback scheme, so this
2756 // implementation is similar to that used for OS-X CoreAudio and Linux
2757 // Jack. The primary constraint with ASIO is that it only allows
2758 // access to a single driver at a time. Thus, it is not possible to
2759 // have more than one simultaneous RtAudio stream.
2761 // This implementation also requires a number of external ASIO files
2762 // and a few global variables. The ASIO callback scheme does not
2763 // allow for the passing of user data, so we must create a global
2764 // pointer to our callbackInfo structure.
2766 // On unix systems, we make use of a pthread condition variable.
2767 // Since there is no equivalent in Windows, I hacked something based
2768 // on information found in
2769 // http://www.cs.wustl.edu/~schmidt/win32-cv-1.html.
2771 #include "asiosys.h"
2773 #include "iasiothiscallresolver.h"
2774 #include "asiodrivers.h"
2777 static AsioDrivers drivers;
2778 static ASIOCallbacks asioCallbacks;
2779 static ASIODriverInfo driverInfo;
2780 static CallbackInfo *asioCallbackInfo;
2781 static bool asioXRun;
2784 int drainCounter; // Tracks callback counts when draining
2785 bool internalDrain; // Indicates if stop is initiated from callback or not.
2786 ASIOBufferInfo *bufferInfos;
2790 :drainCounter(0), internalDrain(false), bufferInfos(0) {}
2793 // Function declarations (definitions at end of section)
2794 static const char* getAsioErrorString( ASIOError result );
2795 static void sampleRateChanged( ASIOSampleRate sRate );
2796 static long asioMessages( long selector, long value, void* message, double* opt );
2798 RtApiAsio :: RtApiAsio()
2800 // ASIO cannot run on a multi-threaded appartment. You can call
2801 // CoInitialize beforehand, but it must be for appartment threading
2802 // (in which case, CoInitilialize will return S_FALSE here).
2803 coInitialized_ = false;
2804 HRESULT hr = CoInitialize( NULL );
2806 errorText_ = "RtApiAsio::ASIO requires a single-threaded appartment. Call CoInitializeEx(0,COINIT_APARTMENTTHREADED)";
2807 error( RtAudioError::WARNING );
2809 coInitialized_ = true;
2811 drivers.removeCurrentDriver();
2812 driverInfo.asioVersion = 2;
2814 // See note in DirectSound implementation about GetDesktopWindow().
2815 driverInfo.sysRef = GetForegroundWindow();
2818 RtApiAsio :: ~RtApiAsio()
2820 if ( stream_.state != STREAM_CLOSED ) closeStream();
2821 if ( coInitialized_ ) CoUninitialize();
2824 unsigned int RtApiAsio :: getDeviceCount( void )
2826 return (unsigned int) drivers.asioGetNumDev();
2829 RtAudio::DeviceInfo RtApiAsio :: getDeviceInfo( unsigned int device )
2831 RtAudio::DeviceInfo info;
2832 info.probed = false;
2835 unsigned int nDevices = getDeviceCount();
2836 if ( nDevices == 0 ) {
2837 errorText_ = "RtApiAsio::getDeviceInfo: no devices found!";
2838 error( RtAudioError::INVALID_USE );
2842 if ( device >= nDevices ) {
2843 errorText_ = "RtApiAsio::getDeviceInfo: device ID is invalid!";
2844 error( RtAudioError::INVALID_USE );
2848 // If a stream is already open, we cannot probe other devices. Thus, use the saved results.
2849 if ( stream_.state != STREAM_CLOSED ) {
2850 if ( device >= devices_.size() ) {
2851 errorText_ = "RtApiAsio::getDeviceInfo: device ID was not present before stream was opened.";
2852 error( RtAudioError::WARNING );
2855 return devices_[ device ];
2858 char driverName[32];
2859 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
2860 if ( result != ASE_OK ) {
2861 errorStream_ << "RtApiAsio::getDeviceInfo: unable to get driver name (" << getAsioErrorString( result ) << ").";
2862 errorText_ = errorStream_.str();
2863 error( RtAudioError::WARNING );
2867 info.name = driverName;
2869 if ( !drivers.loadDriver( driverName ) ) {
2870 errorStream_ << "RtApiAsio::getDeviceInfo: unable to load driver (" << driverName << ").";
2871 errorText_ = errorStream_.str();
2872 error( RtAudioError::WARNING );
2876 result = ASIOInit( &driverInfo );
2877 if ( result != ASE_OK ) {
2878 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
2879 errorText_ = errorStream_.str();
2880 error( RtAudioError::WARNING );
2884 // Determine the device channel information.
2885 long inputChannels, outputChannels;
2886 result = ASIOGetChannels( &inputChannels, &outputChannels );
2887 if ( result != ASE_OK ) {
2888 drivers.removeCurrentDriver();
2889 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
2890 errorText_ = errorStream_.str();
2891 error( RtAudioError::WARNING );
2895 info.outputChannels = outputChannels;
2896 info.inputChannels = inputChannels;
2897 if ( info.outputChannels > 0 && info.inputChannels > 0 )
2898 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
2900 // Determine the supported sample rates.
2901 info.sampleRates.clear();
2902 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
2903 result = ASIOCanSampleRate( (ASIOSampleRate) SAMPLE_RATES[i] );
2904 if ( result == ASE_OK ) {
2905 info.sampleRates.push_back( SAMPLE_RATES[i] );
2907 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
2908 info.preferredSampleRate = SAMPLE_RATES[i];
2912 // Determine supported data types ... just check first channel and assume rest are the same.
2913 ASIOChannelInfo channelInfo;
2914 channelInfo.channel = 0;
2915 channelInfo.isInput = true;
2916 if ( info.inputChannels <= 0 ) channelInfo.isInput = false;
2917 result = ASIOGetChannelInfo( &channelInfo );
2918 if ( result != ASE_OK ) {
2919 drivers.removeCurrentDriver();
2920 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting driver channel info (" << driverName << ").";
2921 errorText_ = errorStream_.str();
2922 error( RtAudioError::WARNING );
2926 info.nativeFormats = 0;
2927 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB )
2928 info.nativeFormats |= RTAUDIO_SINT16;
2929 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB )
2930 info.nativeFormats |= RTAUDIO_SINT32;
2931 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB )
2932 info.nativeFormats |= RTAUDIO_FLOAT32;
2933 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB )
2934 info.nativeFormats |= RTAUDIO_FLOAT64;
2935 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB )
2936 info.nativeFormats |= RTAUDIO_SINT24;
2938 if ( info.outputChannels > 0 )
2939 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
2940 if ( info.inputChannels > 0 )
2941 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
2944 drivers.removeCurrentDriver();
2948 static void bufferSwitch( long index, ASIOBool /*processNow*/ )
2950 RtApiAsio *object = (RtApiAsio *) asioCallbackInfo->object;
2951 object->callbackEvent( index );
2954 void RtApiAsio :: saveDeviceInfo( void )
2958 unsigned int nDevices = getDeviceCount();
2959 devices_.resize( nDevices );
2960 for ( unsigned int i=0; i<nDevices; i++ )
2961 devices_[i] = getDeviceInfo( i );
2964 bool RtApiAsio :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
2965 unsigned int firstChannel, unsigned int sampleRate,
2966 RtAudioFormat format, unsigned int *bufferSize,
2967 RtAudio::StreamOptions *options )
2968 {////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
2970 bool isDuplexInput = mode == INPUT && stream_.mode == OUTPUT;
2972 // For ASIO, a duplex stream MUST use the same driver.
2973 if ( isDuplexInput && stream_.device[0] != device ) {
2974 errorText_ = "RtApiAsio::probeDeviceOpen: an ASIO duplex stream must use the same device for input and output!";
2978 char driverName[32];
2979 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
2980 if ( result != ASE_OK ) {
2981 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to get driver name (" << getAsioErrorString( result ) << ").";
2982 errorText_ = errorStream_.str();
2986 // Only load the driver once for duplex stream.
2987 if ( !isDuplexInput ) {
2988 // The getDeviceInfo() function will not work when a stream is open
2989 // because ASIO does not allow multiple devices to run at the same
2990 // time. Thus, we'll probe the system before opening a stream and
2991 // save the results for use by getDeviceInfo().
2992 this->saveDeviceInfo();
2994 if ( !drivers.loadDriver( driverName ) ) {
2995 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to load driver (" << driverName << ").";
2996 errorText_ = errorStream_.str();
3000 result = ASIOInit( &driverInfo );
3001 if ( result != ASE_OK ) {
3002 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
3003 errorText_ = errorStream_.str();
3008 // keep them before any "goto error", they are used for error cleanup + goto device boundary checks
3009 bool buffersAllocated = false;
3010 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3011 unsigned int nChannels;
3014 // Check the device channel count.
3015 long inputChannels, outputChannels;
3016 result = ASIOGetChannels( &inputChannels, &outputChannels );
3017 if ( result != ASE_OK ) {
3018 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
3019 errorText_ = errorStream_.str();
3023 if ( ( mode == OUTPUT && (channels+firstChannel) > (unsigned int) outputChannels) ||
3024 ( mode == INPUT && (channels+firstChannel) > (unsigned int) inputChannels) ) {
3025 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested channel count (" << channels << ") + offset (" << firstChannel << ").";
3026 errorText_ = errorStream_.str();
3029 stream_.nDeviceChannels[mode] = channels;
3030 stream_.nUserChannels[mode] = channels;
3031 stream_.channelOffset[mode] = firstChannel;
3033 // Verify the sample rate is supported.
3034 result = ASIOCanSampleRate( (ASIOSampleRate) sampleRate );
3035 if ( result != ASE_OK ) {
3036 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested sample rate (" << sampleRate << ").";
3037 errorText_ = errorStream_.str();
3041 // Get the current sample rate
3042 ASIOSampleRate currentRate;
3043 result = ASIOGetSampleRate( ¤tRate );
3044 if ( result != ASE_OK ) {
3045 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error getting sample rate.";
3046 errorText_ = errorStream_.str();
3050 // Set the sample rate only if necessary
3051 if ( currentRate != sampleRate ) {
3052 result = ASIOSetSampleRate( (ASIOSampleRate) sampleRate );
3053 if ( result != ASE_OK ) {
3054 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error setting sample rate (" << sampleRate << ").";
3055 errorText_ = errorStream_.str();
3060 // Determine the driver data type.
3061 ASIOChannelInfo channelInfo;
3062 channelInfo.channel = 0;
3063 if ( mode == OUTPUT ) channelInfo.isInput = false;
3064 else channelInfo.isInput = true;
3065 result = ASIOGetChannelInfo( &channelInfo );
3066 if ( result != ASE_OK ) {
3067 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting data format.";
3068 errorText_ = errorStream_.str();
3072 // Assuming WINDOWS host is always little-endian.
3073 stream_.doByteSwap[mode] = false;
3074 stream_.userFormat = format;
3075 stream_.deviceFormat[mode] = 0;
3076 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB ) {
3077 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
3078 if ( channelInfo.type == ASIOSTInt16MSB ) stream_.doByteSwap[mode] = true;
3080 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB ) {
3081 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
3082 if ( channelInfo.type == ASIOSTInt32MSB ) stream_.doByteSwap[mode] = true;
3084 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB ) {
3085 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
3086 if ( channelInfo.type == ASIOSTFloat32MSB ) stream_.doByteSwap[mode] = true;
3088 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB ) {
3089 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
3090 if ( channelInfo.type == ASIOSTFloat64MSB ) stream_.doByteSwap[mode] = true;
3092 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB ) {
3093 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
3094 if ( channelInfo.type == ASIOSTInt24MSB ) stream_.doByteSwap[mode] = true;
3097 if ( stream_.deviceFormat[mode] == 0 ) {
3098 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") data format not supported by RtAudio.";
3099 errorText_ = errorStream_.str();
3103 // Set the buffer size. For a duplex stream, this will end up
3104 // setting the buffer size based on the input constraints, which
3106 long minSize, maxSize, preferSize, granularity;
3107 result = ASIOGetBufferSize( &minSize, &maxSize, &preferSize, &granularity );
3108 if ( result != ASE_OK ) {
3109 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting buffer size.";
3110 errorText_ = errorStream_.str();
3114 if ( isDuplexInput ) {
3115 // When this is the duplex input (output was opened before), then we have to use the same
3116 // buffersize as the output, because it might use the preferred buffer size, which most
3117 // likely wasn't passed as input to this. The buffer sizes have to be identically anyway,
3118 // So instead of throwing an error, make them equal. The caller uses the reference
3119 // to the "bufferSize" param as usual to set up processing buffers.
3121 *bufferSize = stream_.bufferSize;
3124 if ( *bufferSize == 0 ) *bufferSize = preferSize;
3125 else if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
3126 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
3127 else if ( granularity == -1 ) {
3128 // Make sure bufferSize is a power of two.
3129 int log2_of_min_size = 0;
3130 int log2_of_max_size = 0;
3132 for ( unsigned int i = 0; i < sizeof(long) * 8; i++ ) {
3133 if ( minSize & ((long)1 << i) ) log2_of_min_size = i;
3134 if ( maxSize & ((long)1 << i) ) log2_of_max_size = i;
3137 long min_delta = std::abs( (long)*bufferSize - ((long)1 << log2_of_min_size) );
3138 int min_delta_num = log2_of_min_size;
3140 for (int i = log2_of_min_size + 1; i <= log2_of_max_size; i++) {
3141 long current_delta = std::abs( (long)*bufferSize - ((long)1 << i) );
3142 if (current_delta < min_delta) {
3143 min_delta = current_delta;
3148 *bufferSize = ( (unsigned int)1 << min_delta_num );
3149 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
3150 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
3152 else if ( granularity != 0 ) {
3153 // Set to an even multiple of granularity, rounding up.
3154 *bufferSize = (*bufferSize + granularity-1) / granularity * granularity;
3159 // we don't use it anymore, see above!
3160 // Just left it here for the case...
3161 if ( isDuplexInput && stream_.bufferSize != *bufferSize ) {
3162 errorText_ = "RtApiAsio::probeDeviceOpen: input/output buffersize discrepancy!";
3167 stream_.bufferSize = *bufferSize;
3168 stream_.nBuffers = 2;
3170 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
3171 else stream_.userInterleaved = true;
3173 // ASIO always uses non-interleaved buffers.
3174 stream_.deviceInterleaved[mode] = false;
3176 // Allocate, if necessary, our AsioHandle structure for the stream.
3177 if ( handle == 0 ) {
3179 handle = new AsioHandle;
3181 catch ( std::bad_alloc& ) {
3182 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating AsioHandle memory.";
3185 handle->bufferInfos = 0;
3187 // Create a manual-reset event.
3188 handle->condition = CreateEvent( NULL, // no security
3189 TRUE, // manual-reset
3190 FALSE, // non-signaled initially
3192 stream_.apiHandle = (void *) handle;
3195 // Create the ASIO internal buffers. Since RtAudio sets up input
3196 // and output separately, we'll have to dispose of previously
3197 // created output buffers for a duplex stream.
3198 if ( mode == INPUT && stream_.mode == OUTPUT ) {
3199 ASIODisposeBuffers();
3200 if ( handle->bufferInfos ) free( handle->bufferInfos );
3203 // Allocate, initialize, and save the bufferInfos in our stream callbackInfo structure.
3205 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
3206 handle->bufferInfos = (ASIOBufferInfo *) malloc( nChannels * sizeof(ASIOBufferInfo) );
3207 if ( handle->bufferInfos == NULL ) {
3208 errorStream_ << "RtApiAsio::probeDeviceOpen: error allocating bufferInfo memory for driver (" << driverName << ").";
3209 errorText_ = errorStream_.str();
3213 ASIOBufferInfo *infos;
3214 infos = handle->bufferInfos;
3215 for ( i=0; i<stream_.nDeviceChannels[0]; i++, infos++ ) {
3216 infos->isInput = ASIOFalse;
3217 infos->channelNum = i + stream_.channelOffset[0];
3218 infos->buffers[0] = infos->buffers[1] = 0;
3220 for ( i=0; i<stream_.nDeviceChannels[1]; i++, infos++ ) {
3221 infos->isInput = ASIOTrue;
3222 infos->channelNum = i + stream_.channelOffset[1];
3223 infos->buffers[0] = infos->buffers[1] = 0;
3226 // prepare for callbacks
3227 stream_.sampleRate = sampleRate;
3228 stream_.device[mode] = device;
3229 stream_.mode = isDuplexInput ? DUPLEX : mode;
3231 // store this class instance before registering callbacks, that are going to use it
3232 asioCallbackInfo = &stream_.callbackInfo;
3233 stream_.callbackInfo.object = (void *) this;
3235 // Set up the ASIO callback structure and create the ASIO data buffers.
3236 asioCallbacks.bufferSwitch = &bufferSwitch;
3237 asioCallbacks.sampleRateDidChange = &sampleRateChanged;
3238 asioCallbacks.asioMessage = &asioMessages;
3239 asioCallbacks.bufferSwitchTimeInfo = NULL;
3240 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
3241 if ( result != ASE_OK ) {
3242 // Standard method failed. This can happen with strict/misbehaving drivers that return valid buffer size ranges
3243 // but only accept the preferred buffer size as parameter for ASIOCreateBuffers (e.g. Creative's ASIO driver).
3244 // In that case, let's be naïve and try that instead.
3245 *bufferSize = preferSize;
3246 stream_.bufferSize = *bufferSize;
3247 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
3250 if ( result != ASE_OK ) {
3251 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") creating buffers.";
3252 errorText_ = errorStream_.str();
3255 buffersAllocated = true;
3256 stream_.state = STREAM_STOPPED;
3258 // Set flags for buffer conversion.
3259 stream_.doConvertBuffer[mode] = false;
3260 if ( stream_.userFormat != stream_.deviceFormat[mode] )
3261 stream_.doConvertBuffer[mode] = true;
3262 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
3263 stream_.nUserChannels[mode] > 1 )
3264 stream_.doConvertBuffer[mode] = true;
3266 // Allocate necessary internal buffers
3267 unsigned long bufferBytes;
3268 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
3269 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
3270 if ( stream_.userBuffer[mode] == NULL ) {
3271 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating user buffer memory.";
3275 if ( stream_.doConvertBuffer[mode] ) {
3277 bool makeBuffer = true;
3278 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
3279 if ( isDuplexInput && stream_.deviceBuffer ) {
3280 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
3281 if ( bufferBytes <= bytesOut ) makeBuffer = false;
3285 bufferBytes *= *bufferSize;
3286 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
3287 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
3288 if ( stream_.deviceBuffer == NULL ) {
3289 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating device buffer memory.";
3295 // Determine device latencies
3296 long inputLatency, outputLatency;
3297 result = ASIOGetLatencies( &inputLatency, &outputLatency );
3298 if ( result != ASE_OK ) {
3299 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting latency.";
3300 errorText_ = errorStream_.str();
3301 error( RtAudioError::WARNING); // warn but don't fail
3304 stream_.latency[0] = outputLatency;
3305 stream_.latency[1] = inputLatency;
3308 // Setup the buffer conversion information structure. We don't use
3309 // buffers to do channel offsets, so we override that parameter
3311 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
3316 if ( !isDuplexInput ) {
3317 // the cleanup for error in the duplex input, is done by RtApi::openStream
3318 // So we clean up for single channel only
3320 if ( buffersAllocated )
3321 ASIODisposeBuffers();
3323 drivers.removeCurrentDriver();
3326 CloseHandle( handle->condition );
3327 if ( handle->bufferInfos )
3328 free( handle->bufferInfos );
3331 stream_.apiHandle = 0;
3335 if ( stream_.userBuffer[mode] ) {
3336 free( stream_.userBuffer[mode] );
3337 stream_.userBuffer[mode] = 0;
3340 if ( stream_.deviceBuffer ) {
3341 free( stream_.deviceBuffer );
3342 stream_.deviceBuffer = 0;
3347 }////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
3349 void RtApiAsio :: closeStream()
3351 if ( stream_.state == STREAM_CLOSED ) {
3352 errorText_ = "RtApiAsio::closeStream(): no open stream to close!";
3353 error( RtAudioError::WARNING );
3357 if ( stream_.state == STREAM_RUNNING ) {
3358 stream_.state = STREAM_STOPPED;
3361 ASIODisposeBuffers();
3362 drivers.removeCurrentDriver();
3364 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3366 CloseHandle( handle->condition );
3367 if ( handle->bufferInfos )
3368 free( handle->bufferInfos );
3370 stream_.apiHandle = 0;
3373 for ( int i=0; i<2; i++ ) {
3374 if ( stream_.userBuffer[i] ) {
3375 free( stream_.userBuffer[i] );
3376 stream_.userBuffer[i] = 0;
3380 if ( stream_.deviceBuffer ) {
3381 free( stream_.deviceBuffer );
3382 stream_.deviceBuffer = 0;
3385 stream_.mode = UNINITIALIZED;
3386 stream_.state = STREAM_CLOSED;
3389 bool stopThreadCalled = false;
3391 void RtApiAsio :: startStream()
3394 RtApi::startStream();
3395 if ( stream_.state == STREAM_RUNNING ) {
3396 errorText_ = "RtApiAsio::startStream(): the stream is already running!";
3397 error( RtAudioError::WARNING );
3401 #if defined( HAVE_GETTIMEOFDAY )
3402 gettimeofday( &stream_.lastTickTimestamp, NULL );
3405 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3406 ASIOError result = ASIOStart();
3407 if ( result != ASE_OK ) {
3408 errorStream_ << "RtApiAsio::startStream: error (" << getAsioErrorString( result ) << ") starting device.";
3409 errorText_ = errorStream_.str();
3413 handle->drainCounter = 0;
3414 handle->internalDrain = false;
3415 ResetEvent( handle->condition );
3416 stream_.state = STREAM_RUNNING;
3420 stopThreadCalled = false;
3422 if ( result == ASE_OK ) return;
3423 error( RtAudioError::SYSTEM_ERROR );
3426 void RtApiAsio :: stopStream()
3429 if ( stream_.state == STREAM_STOPPED ) {
3430 errorText_ = "RtApiAsio::stopStream(): the stream is already stopped!";
3431 error( RtAudioError::WARNING );
3435 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3436 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
3437 if ( handle->drainCounter == 0 ) {
3438 handle->drainCounter = 2;
3439 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
3443 stream_.state = STREAM_STOPPED;
3445 ASIOError result = ASIOStop();
3446 if ( result != ASE_OK ) {
3447 errorStream_ << "RtApiAsio::stopStream: error (" << getAsioErrorString( result ) << ") stopping device.";
3448 errorText_ = errorStream_.str();
3451 if ( result == ASE_OK ) return;
3452 error( RtAudioError::SYSTEM_ERROR );
3455 void RtApiAsio :: abortStream()
3458 if ( stream_.state == STREAM_STOPPED ) {
3459 errorText_ = "RtApiAsio::abortStream(): the stream is already stopped!";
3460 error( RtAudioError::WARNING );
3464 // The following lines were commented-out because some behavior was
3465 // noted where the device buffers need to be zeroed to avoid
3466 // continuing sound, even when the device buffers are completely
3467 // disposed. So now, calling abort is the same as calling stop.
3468 // AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3469 // handle->drainCounter = 2;
3473 // This function will be called by a spawned thread when the user
3474 // callback function signals that the stream should be stopped or
3475 // aborted. It is necessary to handle it this way because the
3476 // callbackEvent() function must return before the ASIOStop()
3477 // function will return.
3478 static unsigned __stdcall asioStopStream( void *ptr )
3480 CallbackInfo *info = (CallbackInfo *) ptr;
3481 RtApiAsio *object = (RtApiAsio *) info->object;
3483 object->stopStream();
3488 bool RtApiAsio :: callbackEvent( long bufferIndex )
3490 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
3491 if ( stream_.state == STREAM_CLOSED ) {
3492 errorText_ = "RtApiAsio::callbackEvent(): the stream is closed ... this shouldn't happen!";
3493 error( RtAudioError::WARNING );
3497 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
3498 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3500 // Check if we were draining the stream and signal if finished.
3501 if ( handle->drainCounter > 3 ) {
3503 stream_.state = STREAM_STOPPING;
3504 if ( handle->internalDrain == false )
3505 SetEvent( handle->condition );
3506 else { // spawn a thread to stop the stream
3508 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
3509 &stream_.callbackInfo, 0, &threadId );
3514 // Invoke user callback to get fresh output data UNLESS we are
3516 if ( handle->drainCounter == 0 ) {
3517 RtAudioCallback callback = (RtAudioCallback) info->callback;
3518 double streamTime = getStreamTime();
3519 RtAudioStreamStatus status = 0;
3520 if ( stream_.mode != INPUT && asioXRun == true ) {
3521 status |= RTAUDIO_OUTPUT_UNDERFLOW;
3524 if ( stream_.mode != OUTPUT && asioXRun == true ) {
3525 status |= RTAUDIO_INPUT_OVERFLOW;
3528 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
3529 stream_.bufferSize, streamTime, status, info->userData );
3530 if ( cbReturnValue == 2 ) {
3531 stream_.state = STREAM_STOPPING;
3532 handle->drainCounter = 2;
3534 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
3535 &stream_.callbackInfo, 0, &threadId );
3538 else if ( cbReturnValue == 1 ) {
3539 handle->drainCounter = 1;
3540 handle->internalDrain = true;
3544 unsigned int nChannels, bufferBytes, i, j;
3545 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
3546 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
3548 bufferBytes = stream_.bufferSize * formatBytes( stream_.deviceFormat[0] );
3550 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
3552 for ( i=0, j=0; i<nChannels; i++ ) {
3553 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3554 memset( handle->bufferInfos[i].buffers[bufferIndex], 0, bufferBytes );
3558 else if ( stream_.doConvertBuffer[0] ) {
3560 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
3561 if ( stream_.doByteSwap[0] )
3562 byteSwapBuffer( stream_.deviceBuffer,
3563 stream_.bufferSize * stream_.nDeviceChannels[0],
3564 stream_.deviceFormat[0] );
3566 for ( i=0, j=0; i<nChannels; i++ ) {
3567 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3568 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
3569 &stream_.deviceBuffer[j++*bufferBytes], bufferBytes );
3575 if ( stream_.doByteSwap[0] )
3576 byteSwapBuffer( stream_.userBuffer[0],
3577 stream_.bufferSize * stream_.nUserChannels[0],
3578 stream_.userFormat );
3580 for ( i=0, j=0; i<nChannels; i++ ) {
3581 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3582 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
3583 &stream_.userBuffer[0][bufferBytes*j++], bufferBytes );
3589 // Don't bother draining input
3590 if ( handle->drainCounter ) {
3591 handle->drainCounter++;
3595 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
3597 bufferBytes = stream_.bufferSize * formatBytes(stream_.deviceFormat[1]);
3599 if (stream_.doConvertBuffer[1]) {
3601 // Always interleave ASIO input data.
3602 for ( i=0, j=0; i<nChannels; i++ ) {
3603 if ( handle->bufferInfos[i].isInput == ASIOTrue )
3604 memcpy( &stream_.deviceBuffer[j++*bufferBytes],
3605 handle->bufferInfos[i].buffers[bufferIndex],
3609 if ( stream_.doByteSwap[1] )
3610 byteSwapBuffer( stream_.deviceBuffer,
3611 stream_.bufferSize * stream_.nDeviceChannels[1],
3612 stream_.deviceFormat[1] );
3613 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
3617 for ( i=0, j=0; i<nChannels; i++ ) {
3618 if ( handle->bufferInfos[i].isInput == ASIOTrue ) {
3619 memcpy( &stream_.userBuffer[1][bufferBytes*j++],
3620 handle->bufferInfos[i].buffers[bufferIndex],
3625 if ( stream_.doByteSwap[1] )
3626 byteSwapBuffer( stream_.userBuffer[1],
3627 stream_.bufferSize * stream_.nUserChannels[1],
3628 stream_.userFormat );
3633 // The following call was suggested by Malte Clasen. While the API
3634 // documentation indicates it should not be required, some device
3635 // drivers apparently do not function correctly without it.
3638 RtApi::tickStreamTime();
3642 static void sampleRateChanged( ASIOSampleRate sRate )
3644 // The ASIO documentation says that this usually only happens during
3645 // external sync. Audio processing is not stopped by the driver,
3646 // actual sample rate might not have even changed, maybe only the
3647 // sample rate status of an AES/EBU or S/PDIF digital input at the
3650 RtApi *object = (RtApi *) asioCallbackInfo->object;
3652 object->stopStream();
3654 catch ( RtAudioError &exception ) {
3655 std::cerr << "\nRtApiAsio: sampleRateChanged() error (" << exception.getMessage() << ")!\n" << std::endl;
3659 std::cerr << "\nRtApiAsio: driver reports sample rate changed to " << sRate << " ... stream stopped!!!\n" << std::endl;
3662 static long asioMessages( long selector, long value, void* /*message*/, double* /*opt*/ )
3666 switch( selector ) {
3667 case kAsioSelectorSupported:
3668 if ( value == kAsioResetRequest
3669 || value == kAsioEngineVersion
3670 || value == kAsioResyncRequest
3671 || value == kAsioLatenciesChanged
3672 // The following three were added for ASIO 2.0, you don't
3673 // necessarily have to support them.
3674 || value == kAsioSupportsTimeInfo
3675 || value == kAsioSupportsTimeCode
3676 || value == kAsioSupportsInputMonitor)
3679 case kAsioResetRequest:
3680 // Defer the task and perform the reset of the driver during the
3681 // next "safe" situation. You cannot reset the driver right now,
3682 // as this code is called from the driver. Reset the driver is
3683 // done by completely destruct is. I.e. ASIOStop(),
3684 // ASIODisposeBuffers(), Destruction Afterwards you initialize the
3686 std::cerr << "\nRtApiAsio: driver reset requested!!!" << std::endl;
3689 case kAsioResyncRequest:
3690 // This informs the application that the driver encountered some
3691 // non-fatal data loss. It is used for synchronization purposes
3692 // of different media. Added mainly to work around the Win16Mutex
3693 // problems in Windows 95/98 with the Windows Multimedia system,
3694 // which could lose data because the Mutex was held too long by
3695 // another thread. However a driver can issue it in other
3697 // std::cerr << "\nRtApiAsio: driver resync requested!!!" << std::endl;
3701 case kAsioLatenciesChanged:
3702 // This will inform the host application that the drivers were
3703 // latencies changed. Beware, it this does not mean that the
3704 // buffer sizes have changed! You might need to update internal
3706 std::cerr << "\nRtApiAsio: driver latency may have changed!!!" << std::endl;
3709 case kAsioEngineVersion:
3710 // Return the supported ASIO version of the host application. If
3711 // a host application does not implement this selector, ASIO 1.0
3712 // is assumed by the driver.
3715 case kAsioSupportsTimeInfo:
3716 // Informs the driver whether the
3717 // asioCallbacks.bufferSwitchTimeInfo() callback is supported.
3718 // For compatibility with ASIO 1.0 drivers the host application
3719 // should always support the "old" bufferSwitch method, too.
3722 case kAsioSupportsTimeCode:
3723 // Informs the driver whether application is interested in time
3724 // code info. If an application does not need to know about time
3725 // code, the driver has less work to do.
3732 static const char* getAsioErrorString( ASIOError result )
3740 static const Messages m[] =
3742 { ASE_NotPresent, "Hardware input or output is not present or available." },
3743 { ASE_HWMalfunction, "Hardware is malfunctioning." },
3744 { ASE_InvalidParameter, "Invalid input parameter." },
3745 { ASE_InvalidMode, "Invalid mode." },
3746 { ASE_SPNotAdvancing, "Sample position not advancing." },
3747 { ASE_NoClock, "Sample clock or rate cannot be determined or is not present." },
3748 { ASE_NoMemory, "Not enough memory to complete the request." }
3751 for ( unsigned int i = 0; i < sizeof(m)/sizeof(m[0]); ++i )
3752 if ( m[i].value == result ) return m[i].message;
3754 return "Unknown error.";
3757 //******************** End of __WINDOWS_ASIO__ *********************//
3761 #if defined(__WINDOWS_WASAPI__) // Windows WASAPI API
3763 // Authored by Marcus Tomlinson <themarcustomlinson@gmail.com>, April 2014
3764 // - Introduces support for the Windows WASAPI API
3765 // - Aims to deliver bit streams to and from hardware at the lowest possible latency, via the absolute minimum buffer sizes required
3766 // - Provides flexible stream configuration to an otherwise strict and inflexible WASAPI interface
3767 // - Includes automatic internal conversion of sample rate and buffer size between hardware and the user
3774 #include <mferror.h>
3776 #include <mftransform.h>
3777 #include <wmcodecdsp.h>
3779 #include <audioclient.h>
3781 #include <mmdeviceapi.h>
3782 #include <FunctionDiscoveryKeys_devpkey.h>
3784 #ifndef MF_E_TRANSFORM_NEED_MORE_INPUT
3785 #define MF_E_TRANSFORM_NEED_MORE_INPUT _HRESULT_TYPEDEF_(0xc00d6d72)
3788 #ifndef MFSTARTUP_NOSOCKET
3789 #define MFSTARTUP_NOSOCKET 0x1
3793 #pragma comment( lib, "ksuser" )
3794 #pragma comment( lib, "mfplat.lib" )
3795 #pragma comment( lib, "mfuuid.lib" )
3796 #pragma comment( lib, "wmcodecdspuuid" )
3799 //=============================================================================
3801 #define SAFE_RELEASE( objectPtr )\
3804 objectPtr->Release();\
3808 typedef HANDLE ( __stdcall *TAvSetMmThreadCharacteristicsPtr )( LPCWSTR TaskName, LPDWORD TaskIndex );
3810 //-----------------------------------------------------------------------------
3812 // WASAPI dictates stream sample rate, format, channel count, and in some cases, buffer size.
3813 // Therefore we must perform all necessary conversions to user buffers in order to satisfy these
3814 // requirements. WasapiBuffer ring buffers are used between HwIn->UserIn and UserOut->HwOut to
3815 // provide intermediate storage for read / write synchronization.
3829 // sets the length of the internal ring buffer
3830 void setBufferSize( unsigned int bufferSize, unsigned int formatBytes ) {
3833 buffer_ = ( char* ) calloc( bufferSize, formatBytes );
3835 bufferSize_ = bufferSize;
3840 // attempt to push a buffer into the ring buffer at the current "in" index
3841 bool pushBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
3843 if ( !buffer || // incoming buffer is NULL
3844 bufferSize == 0 || // incoming buffer has no data
3845 bufferSize > bufferSize_ ) // incoming buffer too large
3850 unsigned int relOutIndex = outIndex_;
3851 unsigned int inIndexEnd = inIndex_ + bufferSize;
3852 if ( relOutIndex < inIndex_ && inIndexEnd >= bufferSize_ ) {
3853 relOutIndex += bufferSize_;
3856 // the "IN" index CAN BEGIN at the "OUT" index
3857 // the "IN" index CANNOT END at the "OUT" index
3858 if ( inIndex_ < relOutIndex && inIndexEnd >= relOutIndex ) {
3859 return false; // not enough space between "in" index and "out" index
3862 // copy buffer from external to internal
3863 int fromZeroSize = inIndex_ + bufferSize - bufferSize_;
3864 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
3865 int fromInSize = bufferSize - fromZeroSize;
3870 memcpy( &( ( char* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( char ) );
3871 memcpy( buffer_, &( ( char* ) buffer )[fromInSize], fromZeroSize * sizeof( char ) );
3873 case RTAUDIO_SINT16:
3874 memcpy( &( ( short* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( short ) );
3875 memcpy( buffer_, &( ( short* ) buffer )[fromInSize], fromZeroSize * sizeof( short ) );
3877 case RTAUDIO_SINT24:
3878 memcpy( &( ( S24* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( S24 ) );
3879 memcpy( buffer_, &( ( S24* ) buffer )[fromInSize], fromZeroSize * sizeof( S24 ) );
3881 case RTAUDIO_SINT32:
3882 memcpy( &( ( int* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( int ) );
3883 memcpy( buffer_, &( ( int* ) buffer )[fromInSize], fromZeroSize * sizeof( int ) );
3885 case RTAUDIO_FLOAT32:
3886 memcpy( &( ( float* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( float ) );
3887 memcpy( buffer_, &( ( float* ) buffer )[fromInSize], fromZeroSize * sizeof( float ) );
3889 case RTAUDIO_FLOAT64:
3890 memcpy( &( ( double* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( double ) );
3891 memcpy( buffer_, &( ( double* ) buffer )[fromInSize], fromZeroSize * sizeof( double ) );
3895 // update "in" index
3896 inIndex_ += bufferSize;
3897 inIndex_ %= bufferSize_;
3902 // attempt to pull a buffer from the ring buffer from the current "out" index
3903 bool pullBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
3905 if ( !buffer || // incoming buffer is NULL
3906 bufferSize == 0 || // incoming buffer has no data
3907 bufferSize > bufferSize_ ) // incoming buffer too large
3912 unsigned int relInIndex = inIndex_;
3913 unsigned int outIndexEnd = outIndex_ + bufferSize;
3914 if ( relInIndex < outIndex_ && outIndexEnd >= bufferSize_ ) {
3915 relInIndex += bufferSize_;
3918 // the "OUT" index CANNOT BEGIN at the "IN" index
3919 // the "OUT" index CAN END at the "IN" index
3920 if ( outIndex_ <= relInIndex && outIndexEnd > relInIndex ) {
3921 return false; // not enough space between "out" index and "in" index
3924 // copy buffer from internal to external
3925 int fromZeroSize = outIndex_ + bufferSize - bufferSize_;
3926 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
3927 int fromOutSize = bufferSize - fromZeroSize;
3932 memcpy( buffer, &( ( char* ) buffer_ )[outIndex_], fromOutSize * sizeof( char ) );
3933 memcpy( &( ( char* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( char ) );
3935 case RTAUDIO_SINT16:
3936 memcpy( buffer, &( ( short* ) buffer_ )[outIndex_], fromOutSize * sizeof( short ) );
3937 memcpy( &( ( short* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( short ) );
3939 case RTAUDIO_SINT24:
3940 memcpy( buffer, &( ( S24* ) buffer_ )[outIndex_], fromOutSize * sizeof( S24 ) );
3941 memcpy( &( ( S24* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( S24 ) );
3943 case RTAUDIO_SINT32:
3944 memcpy( buffer, &( ( int* ) buffer_ )[outIndex_], fromOutSize * sizeof( int ) );
3945 memcpy( &( ( int* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( int ) );
3947 case RTAUDIO_FLOAT32:
3948 memcpy( buffer, &( ( float* ) buffer_ )[outIndex_], fromOutSize * sizeof( float ) );
3949 memcpy( &( ( float* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( float ) );
3951 case RTAUDIO_FLOAT64:
3952 memcpy( buffer, &( ( double* ) buffer_ )[outIndex_], fromOutSize * sizeof( double ) );
3953 memcpy( &( ( double* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( double ) );
3957 // update "out" index
3958 outIndex_ += bufferSize;
3959 outIndex_ %= bufferSize_;
3966 unsigned int bufferSize_;
3967 unsigned int inIndex_;
3968 unsigned int outIndex_;
3971 //-----------------------------------------------------------------------------
3973 // In order to satisfy WASAPI's buffer requirements, we need a means of converting sample rate
3974 // between HW and the user. The WasapiResampler class is used to perform this conversion between
3975 // HwIn->UserIn and UserOut->HwOut during the stream callback loop.
3976 class WasapiResampler
3979 WasapiResampler( bool isFloat, unsigned int bitsPerSample, unsigned int channelCount,
3980 unsigned int inSampleRate, unsigned int outSampleRate )
3981 : _bytesPerSample( bitsPerSample / 8 )
3982 , _channelCount( channelCount )
3983 , _sampleRatio( ( float ) outSampleRate / inSampleRate )
3984 , _transformUnk( NULL )
3985 , _transform( NULL )
3986 , _mediaType( NULL )
3987 , _inputMediaType( NULL )
3988 , _outputMediaType( NULL )
3990 #ifdef __IWMResamplerProps_FWD_DEFINED__
3991 , _resamplerProps( NULL )
3994 // 1. Initialization
3996 MFStartup( MF_VERSION, MFSTARTUP_NOSOCKET );
3998 // 2. Create Resampler Transform Object
4000 CoCreateInstance( CLSID_CResamplerMediaObject, NULL, CLSCTX_INPROC_SERVER,
4001 IID_IUnknown, ( void** ) &_transformUnk );
4003 _transformUnk->QueryInterface( IID_PPV_ARGS( &_transform ) );
4005 #ifdef __IWMResamplerProps_FWD_DEFINED__
4006 _transformUnk->QueryInterface( IID_PPV_ARGS( &_resamplerProps ) );
4007 _resamplerProps->SetHalfFilterLength( 60 ); // best conversion quality
4010 // 3. Specify input / output format
4012 MFCreateMediaType( &_mediaType );
4013 _mediaType->SetGUID( MF_MT_MAJOR_TYPE, MFMediaType_Audio );
4014 _mediaType->SetGUID( MF_MT_SUBTYPE, isFloat ? MFAudioFormat_Float : MFAudioFormat_PCM );
4015 _mediaType->SetUINT32( MF_MT_AUDIO_NUM_CHANNELS, channelCount );
4016 _mediaType->SetUINT32( MF_MT_AUDIO_SAMPLES_PER_SECOND, inSampleRate );
4017 _mediaType->SetUINT32( MF_MT_AUDIO_BLOCK_ALIGNMENT, _bytesPerSample * channelCount );
4018 _mediaType->SetUINT32( MF_MT_AUDIO_AVG_BYTES_PER_SECOND, _bytesPerSample * channelCount * inSampleRate );
4019 _mediaType->SetUINT32( MF_MT_AUDIO_BITS_PER_SAMPLE, bitsPerSample );
4020 _mediaType->SetUINT32( MF_MT_ALL_SAMPLES_INDEPENDENT, TRUE );
4022 MFCreateMediaType( &_inputMediaType );
4023 _mediaType->CopyAllItems( _inputMediaType );
4025 _transform->SetInputType( 0, _inputMediaType, 0 );
4027 MFCreateMediaType( &_outputMediaType );
4028 _mediaType->CopyAllItems( _outputMediaType );
4030 _outputMediaType->SetUINT32( MF_MT_AUDIO_SAMPLES_PER_SECOND, outSampleRate );
4031 _outputMediaType->SetUINT32( MF_MT_AUDIO_AVG_BYTES_PER_SECOND, _bytesPerSample * channelCount * outSampleRate );
4033 _transform->SetOutputType( 0, _outputMediaType, 0 );
4035 // 4. Send stream start messages to Resampler
4037 _transform->ProcessMessage( MFT_MESSAGE_COMMAND_FLUSH, 0 );
4038 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_BEGIN_STREAMING, 0 );
4039 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_START_OF_STREAM, 0 );
4044 // 8. Send stream stop messages to Resampler
4046 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_END_OF_STREAM, 0 );
4047 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_END_STREAMING, 0 );
4053 SAFE_RELEASE( _transformUnk );
4054 SAFE_RELEASE( _transform );
4055 SAFE_RELEASE( _mediaType );
4056 SAFE_RELEASE( _inputMediaType );
4057 SAFE_RELEASE( _outputMediaType );
4059 #ifdef __IWMResamplerProps_FWD_DEFINED__
4060 SAFE_RELEASE( _resamplerProps );
4064 void Convert( char* outBuffer, const char* inBuffer, unsigned int inSampleCount, unsigned int& outSampleCount )
4066 unsigned int inputBufferSize = _bytesPerSample * _channelCount * inSampleCount;
4067 if ( _sampleRatio == 1 )
4069 // no sample rate conversion required
4070 memcpy( outBuffer, inBuffer, inputBufferSize );
4071 outSampleCount = inSampleCount;
4075 unsigned int outputBufferSize = ( unsigned int ) ceilf( inputBufferSize * _sampleRatio ) + ( _bytesPerSample * _channelCount );
4077 IMFMediaBuffer* rInBuffer;
4078 IMFSample* rInSample;
4079 BYTE* rInByteBuffer = NULL;
4081 // 5. Create Sample object from input data
4083 MFCreateMemoryBuffer( inputBufferSize, &rInBuffer );
4085 rInBuffer->Lock( &rInByteBuffer, NULL, NULL );
4086 memcpy( rInByteBuffer, inBuffer, inputBufferSize );
4087 rInBuffer->Unlock();
4088 rInByteBuffer = NULL;
4090 rInBuffer->SetCurrentLength( inputBufferSize );
4092 MFCreateSample( &rInSample );
4093 rInSample->AddBuffer( rInBuffer );
4095 // 6. Pass input data to Resampler
4097 _transform->ProcessInput( 0, rInSample, 0 );
4099 SAFE_RELEASE( rInBuffer );
4100 SAFE_RELEASE( rInSample );
4102 // 7. Perform sample rate conversion
4104 IMFMediaBuffer* rOutBuffer = NULL;
4105 BYTE* rOutByteBuffer = NULL;
4107 MFT_OUTPUT_DATA_BUFFER rOutDataBuffer;
4109 DWORD rBytes = outputBufferSize; // maximum bytes accepted per ProcessOutput
4111 // 7.1 Create Sample object for output data
4113 memset( &rOutDataBuffer, 0, sizeof rOutDataBuffer );
4114 MFCreateSample( &( rOutDataBuffer.pSample ) );
4115 MFCreateMemoryBuffer( rBytes, &rOutBuffer );
4116 rOutDataBuffer.pSample->AddBuffer( rOutBuffer );
4117 rOutDataBuffer.dwStreamID = 0;
4118 rOutDataBuffer.dwStatus = 0;
4119 rOutDataBuffer.pEvents = NULL;
4121 // 7.2 Get output data from Resampler
4123 if ( _transform->ProcessOutput( 0, 1, &rOutDataBuffer, &rStatus ) == MF_E_TRANSFORM_NEED_MORE_INPUT )
4126 SAFE_RELEASE( rOutBuffer );
4127 SAFE_RELEASE( rOutDataBuffer.pSample );
4131 // 7.3 Write output data to outBuffer
4133 SAFE_RELEASE( rOutBuffer );
4134 rOutDataBuffer.pSample->ConvertToContiguousBuffer( &rOutBuffer );
4135 rOutBuffer->GetCurrentLength( &rBytes );
4137 rOutBuffer->Lock( &rOutByteBuffer, NULL, NULL );
4138 memcpy( outBuffer, rOutByteBuffer, rBytes );
4139 rOutBuffer->Unlock();
4140 rOutByteBuffer = NULL;
4142 outSampleCount = rBytes / _bytesPerSample / _channelCount;
4143 SAFE_RELEASE( rOutBuffer );
4144 SAFE_RELEASE( rOutDataBuffer.pSample );
4148 unsigned int _bytesPerSample;
4149 unsigned int _channelCount;
4152 IUnknown* _transformUnk;
4153 IMFTransform* _transform;
4154 IMFMediaType* _mediaType;
4155 IMFMediaType* _inputMediaType;
4156 IMFMediaType* _outputMediaType;
4158 #ifdef __IWMResamplerProps_FWD_DEFINED__
4159 IWMResamplerProps* _resamplerProps;
4163 //-----------------------------------------------------------------------------
4165 // A structure to hold various information related to the WASAPI implementation.
4168 IAudioClient* captureAudioClient;
4169 IAudioClient* renderAudioClient;
4170 IAudioCaptureClient* captureClient;
4171 IAudioRenderClient* renderClient;
4172 HANDLE captureEvent;
4176 : captureAudioClient( NULL ),
4177 renderAudioClient( NULL ),
4178 captureClient( NULL ),
4179 renderClient( NULL ),
4180 captureEvent( NULL ),
4181 renderEvent( NULL ) {}
4184 //=============================================================================
4186 RtApiWasapi::RtApiWasapi()
4187 : coInitialized_( false ), deviceEnumerator_( NULL )
4189 // WASAPI can run either apartment or multi-threaded
4190 HRESULT hr = CoInitialize( NULL );
4191 if ( !FAILED( hr ) )
4192 coInitialized_ = true;
4194 // Instantiate device enumerator
4195 hr = CoCreateInstance( __uuidof( MMDeviceEnumerator ), NULL,
4196 CLSCTX_ALL, __uuidof( IMMDeviceEnumerator ),
4197 ( void** ) &deviceEnumerator_ );
4199 // If this runs on an old Windows, it will fail. Ignore and proceed.
4201 deviceEnumerator_ = NULL;
4204 //-----------------------------------------------------------------------------
4206 RtApiWasapi::~RtApiWasapi()
4208 if ( stream_.state != STREAM_CLOSED )
4211 SAFE_RELEASE( deviceEnumerator_ );
4213 // If this object previously called CoInitialize()
4214 if ( coInitialized_ )
4218 //=============================================================================
4220 unsigned int RtApiWasapi::getDeviceCount( void )
4222 unsigned int captureDeviceCount = 0;
4223 unsigned int renderDeviceCount = 0;
4225 IMMDeviceCollection* captureDevices = NULL;
4226 IMMDeviceCollection* renderDevices = NULL;
4228 if ( !deviceEnumerator_ )
4231 // Count capture devices
4233 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4234 if ( FAILED( hr ) ) {
4235 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device collection.";
4239 hr = captureDevices->GetCount( &captureDeviceCount );
4240 if ( FAILED( hr ) ) {
4241 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device count.";
4245 // Count render devices
4246 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4247 if ( FAILED( hr ) ) {
4248 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device collection.";
4252 hr = renderDevices->GetCount( &renderDeviceCount );
4253 if ( FAILED( hr ) ) {
4254 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device count.";
4259 // release all references
4260 SAFE_RELEASE( captureDevices );
4261 SAFE_RELEASE( renderDevices );
4263 if ( errorText_.empty() )
4264 return captureDeviceCount + renderDeviceCount;
4266 error( RtAudioError::DRIVER_ERROR );
4270 //-----------------------------------------------------------------------------
4272 RtAudio::DeviceInfo RtApiWasapi::getDeviceInfo( unsigned int device )
4274 RtAudio::DeviceInfo info;
4275 unsigned int captureDeviceCount = 0;
4276 unsigned int renderDeviceCount = 0;
4277 std::string defaultDeviceName;
4278 bool isCaptureDevice = false;
4280 PROPVARIANT deviceNameProp;
4281 PROPVARIANT defaultDeviceNameProp;
4283 IMMDeviceCollection* captureDevices = NULL;
4284 IMMDeviceCollection* renderDevices = NULL;
4285 IMMDevice* devicePtr = NULL;
4286 IMMDevice* defaultDevicePtr = NULL;
4287 IAudioClient* audioClient = NULL;
4288 IPropertyStore* devicePropStore = NULL;
4289 IPropertyStore* defaultDevicePropStore = NULL;
4291 WAVEFORMATEX* deviceFormat = NULL;
4292 WAVEFORMATEX* closestMatchFormat = NULL;
4295 info.probed = false;
4297 // Count capture devices
4299 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4300 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4301 if ( FAILED( hr ) ) {
4302 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device collection.";
4306 hr = captureDevices->GetCount( &captureDeviceCount );
4307 if ( FAILED( hr ) ) {
4308 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device count.";
4312 // Count render devices
4313 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4314 if ( FAILED( hr ) ) {
4315 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device collection.";
4319 hr = renderDevices->GetCount( &renderDeviceCount );
4320 if ( FAILED( hr ) ) {
4321 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device count.";
4325 // validate device index
4326 if ( device >= captureDeviceCount + renderDeviceCount ) {
4327 errorText_ = "RtApiWasapi::getDeviceInfo: Invalid device index.";
4328 errorType = RtAudioError::INVALID_USE;
4332 // determine whether index falls within capture or render devices
4333 if ( device >= renderDeviceCount ) {
4334 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
4335 if ( FAILED( hr ) ) {
4336 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device handle.";
4339 isCaptureDevice = true;
4342 hr = renderDevices->Item( device, &devicePtr );
4343 if ( FAILED( hr ) ) {
4344 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device handle.";
4347 isCaptureDevice = false;
4350 // get default device name
4351 if ( isCaptureDevice ) {
4352 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eCapture, eConsole, &defaultDevicePtr );
4353 if ( FAILED( hr ) ) {
4354 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default capture device handle.";
4359 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eRender, eConsole, &defaultDevicePtr );
4360 if ( FAILED( hr ) ) {
4361 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default render device handle.";
4366 hr = defaultDevicePtr->OpenPropertyStore( STGM_READ, &defaultDevicePropStore );
4367 if ( FAILED( hr ) ) {
4368 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open default device property store.";
4371 PropVariantInit( &defaultDeviceNameProp );
4373 hr = defaultDevicePropStore->GetValue( PKEY_Device_FriendlyName, &defaultDeviceNameProp );
4374 if ( FAILED( hr ) ) {
4375 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default device property: PKEY_Device_FriendlyName.";
4379 defaultDeviceName = convertCharPointerToStdString(defaultDeviceNameProp.pwszVal);
4382 hr = devicePtr->OpenPropertyStore( STGM_READ, &devicePropStore );
4383 if ( FAILED( hr ) ) {
4384 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open device property store.";
4388 PropVariantInit( &deviceNameProp );
4390 hr = devicePropStore->GetValue( PKEY_Device_FriendlyName, &deviceNameProp );
4391 if ( FAILED( hr ) ) {
4392 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device property: PKEY_Device_FriendlyName.";
4396 info.name =convertCharPointerToStdString(deviceNameProp.pwszVal);
4399 if ( isCaptureDevice ) {
4400 info.isDefaultInput = info.name == defaultDeviceName;
4401 info.isDefaultOutput = false;
4404 info.isDefaultInput = false;
4405 info.isDefaultOutput = info.name == defaultDeviceName;
4409 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL, NULL, ( void** ) &audioClient );
4410 if ( FAILED( hr ) ) {
4411 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device audio client.";
4415 hr = audioClient->GetMixFormat( &deviceFormat );
4416 if ( FAILED( hr ) ) {
4417 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device mix format.";
4421 if ( isCaptureDevice ) {
4422 info.inputChannels = deviceFormat->nChannels;
4423 info.outputChannels = 0;
4424 info.duplexChannels = 0;
4427 info.inputChannels = 0;
4428 info.outputChannels = deviceFormat->nChannels;
4429 info.duplexChannels = 0;
4433 info.sampleRates.clear();
4435 // allow support for all sample rates as we have a built-in sample rate converter
4436 for ( unsigned int i = 0; i < MAX_SAMPLE_RATES; i++ ) {
4437 info.sampleRates.push_back( SAMPLE_RATES[i] );
4439 info.preferredSampleRate = deviceFormat->nSamplesPerSec;
4442 info.nativeFormats = 0;
4444 if ( deviceFormat->wFormatTag == WAVE_FORMAT_IEEE_FLOAT ||
4445 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
4446 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_IEEE_FLOAT ) )
4448 if ( deviceFormat->wBitsPerSample == 32 ) {
4449 info.nativeFormats |= RTAUDIO_FLOAT32;
4451 else if ( deviceFormat->wBitsPerSample == 64 ) {
4452 info.nativeFormats |= RTAUDIO_FLOAT64;
4455 else if ( deviceFormat->wFormatTag == WAVE_FORMAT_PCM ||
4456 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
4457 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_PCM ) )
4459 if ( deviceFormat->wBitsPerSample == 8 ) {
4460 info.nativeFormats |= RTAUDIO_SINT8;
4462 else if ( deviceFormat->wBitsPerSample == 16 ) {
4463 info.nativeFormats |= RTAUDIO_SINT16;
4465 else if ( deviceFormat->wBitsPerSample == 24 ) {
4466 info.nativeFormats |= RTAUDIO_SINT24;
4468 else if ( deviceFormat->wBitsPerSample == 32 ) {
4469 info.nativeFormats |= RTAUDIO_SINT32;
4477 // release all references
4478 PropVariantClear( &deviceNameProp );
4479 PropVariantClear( &defaultDeviceNameProp );
4481 SAFE_RELEASE( captureDevices );
4482 SAFE_RELEASE( renderDevices );
4483 SAFE_RELEASE( devicePtr );
4484 SAFE_RELEASE( defaultDevicePtr );
4485 SAFE_RELEASE( audioClient );
4486 SAFE_RELEASE( devicePropStore );
4487 SAFE_RELEASE( defaultDevicePropStore );
4489 CoTaskMemFree( deviceFormat );
4490 CoTaskMemFree( closestMatchFormat );
4492 if ( !errorText_.empty() )
4497 //-----------------------------------------------------------------------------
4499 unsigned int RtApiWasapi::getDefaultOutputDevice( void )
4501 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
4502 if ( getDeviceInfo( i ).isDefaultOutput ) {
4510 //-----------------------------------------------------------------------------
4512 unsigned int RtApiWasapi::getDefaultInputDevice( void )
4514 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
4515 if ( getDeviceInfo( i ).isDefaultInput ) {
4523 //-----------------------------------------------------------------------------
4525 void RtApiWasapi::closeStream( void )
4527 if ( stream_.state == STREAM_CLOSED ) {
4528 errorText_ = "RtApiWasapi::closeStream: No open stream to close.";
4529 error( RtAudioError::WARNING );
4533 if ( stream_.state != STREAM_STOPPED )
4536 // clean up stream memory
4537 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient )
4538 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient )
4540 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureClient )
4541 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderClient )
4543 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent )
4544 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent );
4546 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent )
4547 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent );
4549 delete ( WasapiHandle* ) stream_.apiHandle;
4550 stream_.apiHandle = NULL;
4552 for ( int i = 0; i < 2; i++ ) {
4553 if ( stream_.userBuffer[i] ) {
4554 free( stream_.userBuffer[i] );
4555 stream_.userBuffer[i] = 0;
4559 if ( stream_.deviceBuffer ) {
4560 free( stream_.deviceBuffer );
4561 stream_.deviceBuffer = 0;
4564 // update stream state
4565 stream_.state = STREAM_CLOSED;
4568 //-----------------------------------------------------------------------------
4570 void RtApiWasapi::startStream( void )
4573 RtApi::startStream();
4575 if ( stream_.state == STREAM_RUNNING ) {
4576 errorText_ = "RtApiWasapi::startStream: The stream is already running.";
4577 error( RtAudioError::WARNING );
4581 #if defined( HAVE_GETTIMEOFDAY )
4582 gettimeofday( &stream_.lastTickTimestamp, NULL );
4585 // update stream state
4586 stream_.state = STREAM_RUNNING;
4588 // create WASAPI stream thread
4589 stream_.callbackInfo.thread = ( ThreadHandle ) CreateThread( NULL, 0, runWasapiThread, this, CREATE_SUSPENDED, NULL );
4591 if ( !stream_.callbackInfo.thread ) {
4592 errorText_ = "RtApiWasapi::startStream: Unable to instantiate callback thread.";
4593 error( RtAudioError::THREAD_ERROR );
4596 SetThreadPriority( ( void* ) stream_.callbackInfo.thread, stream_.callbackInfo.priority );
4597 ResumeThread( ( void* ) stream_.callbackInfo.thread );
4601 //-----------------------------------------------------------------------------
4603 void RtApiWasapi::stopStream( void )
4607 if ( stream_.state == STREAM_STOPPED ) {
4608 errorText_ = "RtApiWasapi::stopStream: The stream is already stopped.";
4609 error( RtAudioError::WARNING );
4613 // inform stream thread by setting stream state to STREAM_STOPPING
4614 stream_.state = STREAM_STOPPING;
4616 // wait until stream thread is stopped
4617 while( stream_.state != STREAM_STOPPED ) {
4621 // Wait for the last buffer to play before stopping.
4622 Sleep( 1000 * stream_.bufferSize / stream_.sampleRate );
4624 // close thread handle
4625 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
4626 errorText_ = "RtApiWasapi::stopStream: Unable to close callback thread.";
4627 error( RtAudioError::THREAD_ERROR );
4631 stream_.callbackInfo.thread = (ThreadHandle) NULL;
4634 //-----------------------------------------------------------------------------
4636 void RtApiWasapi::abortStream( void )
4640 if ( stream_.state == STREAM_STOPPED ) {
4641 errorText_ = "RtApiWasapi::abortStream: The stream is already stopped.";
4642 error( RtAudioError::WARNING );
4646 // inform stream thread by setting stream state to STREAM_STOPPING
4647 stream_.state = STREAM_STOPPING;
4649 // wait until stream thread is stopped
4650 while ( stream_.state != STREAM_STOPPED ) {
4654 // close thread handle
4655 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
4656 errorText_ = "RtApiWasapi::abortStream: Unable to close callback thread.";
4657 error( RtAudioError::THREAD_ERROR );
4661 stream_.callbackInfo.thread = (ThreadHandle) NULL;
4664 //-----------------------------------------------------------------------------
4666 bool RtApiWasapi::probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
4667 unsigned int firstChannel, unsigned int sampleRate,
4668 RtAudioFormat format, unsigned int* bufferSize,
4669 RtAudio::StreamOptions* options )
4671 bool methodResult = FAILURE;
4672 unsigned int captureDeviceCount = 0;
4673 unsigned int renderDeviceCount = 0;
4675 IMMDeviceCollection* captureDevices = NULL;
4676 IMMDeviceCollection* renderDevices = NULL;
4677 IMMDevice* devicePtr = NULL;
4678 WAVEFORMATEX* deviceFormat = NULL;
4679 unsigned int bufferBytes;
4680 stream_.state = STREAM_STOPPED;
4682 // create API Handle if not already created
4683 if ( !stream_.apiHandle )
4684 stream_.apiHandle = ( void* ) new WasapiHandle();
4686 // Count capture devices
4688 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4689 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4690 if ( FAILED( hr ) ) {
4691 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device collection.";
4695 hr = captureDevices->GetCount( &captureDeviceCount );
4696 if ( FAILED( hr ) ) {
4697 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device count.";
4701 // Count render devices
4702 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4703 if ( FAILED( hr ) ) {
4704 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device collection.";
4708 hr = renderDevices->GetCount( &renderDeviceCount );
4709 if ( FAILED( hr ) ) {
4710 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device count.";
4714 // validate device index
4715 if ( device >= captureDeviceCount + renderDeviceCount ) {
4716 errorType = RtAudioError::INVALID_USE;
4717 errorText_ = "RtApiWasapi::probeDeviceOpen: Invalid device index.";
4721 // if device index falls within capture devices
4722 if ( device >= renderDeviceCount ) {
4723 if ( mode != INPUT ) {
4724 errorType = RtAudioError::INVALID_USE;
4725 errorText_ = "RtApiWasapi::probeDeviceOpen: Capture device selected as output device.";
4729 // retrieve captureAudioClient from devicePtr
4730 IAudioClient*& captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4732 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
4733 if ( FAILED( hr ) ) {
4734 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device handle.";
4738 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4739 NULL, ( void** ) &captureAudioClient );
4740 if ( FAILED( hr ) ) {
4741 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device audio client.";
4745 hr = captureAudioClient->GetMixFormat( &deviceFormat );
4746 if ( FAILED( hr ) ) {
4747 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device mix format.";
4751 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4752 captureAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4755 // if device index falls within render devices and is configured for loopback
4756 if ( device < renderDeviceCount && mode == INPUT )
4758 // if renderAudioClient is not initialised, initialise it now
4759 IAudioClient*& renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4760 if ( !renderAudioClient )
4762 probeDeviceOpen( device, OUTPUT, channels, firstChannel, sampleRate, format, bufferSize, options );
4765 // retrieve captureAudioClient from devicePtr
4766 IAudioClient*& captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4768 hr = renderDevices->Item( device, &devicePtr );
4769 if ( FAILED( hr ) ) {
4770 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device handle.";
4774 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4775 NULL, ( void** ) &captureAudioClient );
4776 if ( FAILED( hr ) ) {
4777 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device audio client.";
4781 hr = captureAudioClient->GetMixFormat( &deviceFormat );
4782 if ( FAILED( hr ) ) {
4783 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device mix format.";
4787 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4788 captureAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4791 // if device index falls within render devices and is configured for output
4792 if ( device < renderDeviceCount && mode == OUTPUT )
4794 // if renderAudioClient is already initialised, don't initialise it again
4795 IAudioClient*& renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4796 if ( renderAudioClient )
4798 methodResult = SUCCESS;
4802 hr = renderDevices->Item( device, &devicePtr );
4803 if ( FAILED( hr ) ) {
4804 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device handle.";
4808 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4809 NULL, ( void** ) &renderAudioClient );
4810 if ( FAILED( hr ) ) {
4811 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device audio client.";
4815 hr = renderAudioClient->GetMixFormat( &deviceFormat );
4816 if ( FAILED( hr ) ) {
4817 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device mix format.";
4821 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4822 renderAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4826 if ( ( stream_.mode == OUTPUT && mode == INPUT ) ||
4827 ( stream_.mode == INPUT && mode == OUTPUT ) ) {
4828 stream_.mode = DUPLEX;
4831 stream_.mode = mode;
4834 stream_.device[mode] = device;
4835 stream_.doByteSwap[mode] = false;
4836 stream_.sampleRate = sampleRate;
4837 stream_.bufferSize = *bufferSize;
4838 stream_.nBuffers = 1;
4839 stream_.nUserChannels[mode] = channels;
4840 stream_.channelOffset[mode] = firstChannel;
4841 stream_.userFormat = format;
4842 stream_.deviceFormat[mode] = getDeviceInfo( device ).nativeFormats;
4844 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
4845 stream_.userInterleaved = false;
4847 stream_.userInterleaved = true;
4848 stream_.deviceInterleaved[mode] = true;
4850 // Set flags for buffer conversion.
4851 stream_.doConvertBuffer[mode] = false;
4852 if ( stream_.userFormat != stream_.deviceFormat[mode] ||
4853 stream_.nUserChannels[0] != stream_.nDeviceChannels[0] ||
4854 stream_.nUserChannels[1] != stream_.nDeviceChannels[1] )
4855 stream_.doConvertBuffer[mode] = true;
4856 else if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
4857 stream_.nUserChannels[mode] > 1 )
4858 stream_.doConvertBuffer[mode] = true;
4860 if ( stream_.doConvertBuffer[mode] )
4861 setConvertInfo( mode, 0 );
4863 // Allocate necessary internal buffers
4864 bufferBytes = stream_.nUserChannels[mode] * stream_.bufferSize * formatBytes( stream_.userFormat );
4866 stream_.userBuffer[mode] = ( char* ) calloc( bufferBytes, 1 );
4867 if ( !stream_.userBuffer[mode] ) {
4868 errorType = RtAudioError::MEMORY_ERROR;
4869 errorText_ = "RtApiWasapi::probeDeviceOpen: Error allocating user buffer memory.";
4873 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME )
4874 stream_.callbackInfo.priority = 15;
4876 stream_.callbackInfo.priority = 0;
4878 ///! TODO: RTAUDIO_MINIMIZE_LATENCY // Provide stream buffers directly to callback
4879 ///! TODO: RTAUDIO_HOG_DEVICE // Exclusive mode
4881 methodResult = SUCCESS;
4885 SAFE_RELEASE( captureDevices );
4886 SAFE_RELEASE( renderDevices );
4887 SAFE_RELEASE( devicePtr );
4888 CoTaskMemFree( deviceFormat );
4890 // if method failed, close the stream
4891 if ( methodResult == FAILURE )
4894 if ( !errorText_.empty() )
4896 return methodResult;
4899 //=============================================================================
4901 DWORD WINAPI RtApiWasapi::runWasapiThread( void* wasapiPtr )
4904 ( ( RtApiWasapi* ) wasapiPtr )->wasapiThread();
4909 DWORD WINAPI RtApiWasapi::stopWasapiThread( void* wasapiPtr )
4912 ( ( RtApiWasapi* ) wasapiPtr )->stopStream();
4917 DWORD WINAPI RtApiWasapi::abortWasapiThread( void* wasapiPtr )
4920 ( ( RtApiWasapi* ) wasapiPtr )->abortStream();
4925 //-----------------------------------------------------------------------------
4927 void RtApiWasapi::wasapiThread()
4929 // as this is a new thread, we must CoInitialize it
4930 CoInitialize( NULL );
4934 IAudioClient* captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4935 IAudioClient* renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4936 IAudioCaptureClient* captureClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureClient;
4937 IAudioRenderClient* renderClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderClient;
4938 HANDLE captureEvent = ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent;
4939 HANDLE renderEvent = ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent;
4941 WAVEFORMATEX* captureFormat = NULL;
4942 WAVEFORMATEX* renderFormat = NULL;
4943 float captureSrRatio = 0.0f;
4944 float renderSrRatio = 0.0f;
4945 WasapiBuffer captureBuffer;
4946 WasapiBuffer renderBuffer;
4947 WasapiResampler* captureResampler = NULL;
4948 WasapiResampler* renderResampler = NULL;
4950 // declare local stream variables
4951 RtAudioCallback callback = ( RtAudioCallback ) stream_.callbackInfo.callback;
4952 BYTE* streamBuffer = NULL;
4953 unsigned long captureFlags = 0;
4954 unsigned int bufferFrameCount = 0;
4955 unsigned int numFramesPadding = 0;
4956 unsigned int convBufferSize = 0;
4957 bool loopbackEnabled = stream_.device[INPUT] == stream_.device[OUTPUT];
4958 bool callbackPushed = true;
4959 bool callbackPulled = false;
4960 bool callbackStopped = false;
4961 int callbackResult = 0;
4963 // convBuffer is used to store converted buffers between WASAPI and the user
4964 char* convBuffer = NULL;
4965 unsigned int convBuffSize = 0;
4966 unsigned int deviceBuffSize = 0;
4968 std::string errorText;
4969 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4971 // Attempt to assign "Pro Audio" characteristic to thread
4972 HMODULE AvrtDll = LoadLibrary( (LPCTSTR) "AVRT.dll" );
4974 DWORD taskIndex = 0;
4975 TAvSetMmThreadCharacteristicsPtr AvSetMmThreadCharacteristicsPtr =
4976 ( TAvSetMmThreadCharacteristicsPtr ) (void(*)()) GetProcAddress( AvrtDll, "AvSetMmThreadCharacteristicsW" );
4977 AvSetMmThreadCharacteristicsPtr( L"Pro Audio", &taskIndex );
4978 FreeLibrary( AvrtDll );
4981 // start capture stream if applicable
4982 if ( captureAudioClient ) {
4983 hr = captureAudioClient->GetMixFormat( &captureFormat );
4984 if ( FAILED( hr ) ) {
4985 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
4989 // init captureResampler
4990 captureResampler = new WasapiResampler( stream_.deviceFormat[INPUT] == RTAUDIO_FLOAT32 || stream_.deviceFormat[INPUT] == RTAUDIO_FLOAT64,
4991 formatBytes( stream_.deviceFormat[INPUT] ) * 8, stream_.nDeviceChannels[INPUT],
4992 captureFormat->nSamplesPerSec, stream_.sampleRate );
4994 captureSrRatio = ( ( float ) captureFormat->nSamplesPerSec / stream_.sampleRate );
4996 if ( !captureClient ) {
4997 hr = captureAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
4998 loopbackEnabled ? AUDCLNT_STREAMFLAGS_LOOPBACK : AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
5003 if ( FAILED( hr ) ) {
5004 errorText = "RtApiWasapi::wasapiThread: Unable to initialize capture audio client.";
5008 hr = captureAudioClient->GetService( __uuidof( IAudioCaptureClient ),
5009 ( void** ) &captureClient );
5010 if ( FAILED( hr ) ) {
5011 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve capture client handle.";
5015 // don't configure captureEvent if in loopback mode
5016 if ( !loopbackEnabled )
5018 // configure captureEvent to trigger on every available capture buffer
5019 captureEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
5020 if ( !captureEvent ) {
5021 errorType = RtAudioError::SYSTEM_ERROR;
5022 errorText = "RtApiWasapi::wasapiThread: Unable to create capture event.";
5026 hr = captureAudioClient->SetEventHandle( captureEvent );
5027 if ( FAILED( hr ) ) {
5028 errorText = "RtApiWasapi::wasapiThread: Unable to set capture event handle.";
5032 ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent = captureEvent;
5035 ( ( WasapiHandle* ) stream_.apiHandle )->captureClient = captureClient;
5037 // reset the capture stream
5038 hr = captureAudioClient->Reset();
5039 if ( FAILED( hr ) ) {
5040 errorText = "RtApiWasapi::wasapiThread: Unable to reset capture stream.";
5044 // start the capture stream
5045 hr = captureAudioClient->Start();
5046 if ( FAILED( hr ) ) {
5047 errorText = "RtApiWasapi::wasapiThread: Unable to start capture stream.";
5052 unsigned int inBufferSize = 0;
5053 hr = captureAudioClient->GetBufferSize( &inBufferSize );
5054 if ( FAILED( hr ) ) {
5055 errorText = "RtApiWasapi::wasapiThread: Unable to get capture buffer size.";
5059 // scale outBufferSize according to stream->user sample rate ratio
5060 unsigned int outBufferSize = ( unsigned int ) ceilf( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT];
5061 inBufferSize *= stream_.nDeviceChannels[INPUT];
5063 // set captureBuffer size
5064 captureBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[INPUT] ) );
5067 // start render stream if applicable
5068 if ( renderAudioClient ) {
5069 hr = renderAudioClient->GetMixFormat( &renderFormat );
5070 if ( FAILED( hr ) ) {
5071 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
5075 // init renderResampler
5076 renderResampler = new WasapiResampler( stream_.deviceFormat[OUTPUT] == RTAUDIO_FLOAT32 || stream_.deviceFormat[OUTPUT] == RTAUDIO_FLOAT64,
5077 formatBytes( stream_.deviceFormat[OUTPUT] ) * 8, stream_.nDeviceChannels[OUTPUT],
5078 stream_.sampleRate, renderFormat->nSamplesPerSec );
5080 renderSrRatio = ( ( float ) renderFormat->nSamplesPerSec / stream_.sampleRate );
5082 if ( !renderClient ) {
5083 hr = renderAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
5084 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
5089 if ( FAILED( hr ) ) {
5090 errorText = "RtApiWasapi::wasapiThread: Unable to initialize render audio client.";
5094 hr = renderAudioClient->GetService( __uuidof( IAudioRenderClient ),
5095 ( void** ) &renderClient );
5096 if ( FAILED( hr ) ) {
5097 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve render client handle.";
5101 // configure renderEvent to trigger on every available render buffer
5102 renderEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
5103 if ( !renderEvent ) {
5104 errorType = RtAudioError::SYSTEM_ERROR;
5105 errorText = "RtApiWasapi::wasapiThread: Unable to create render event.";
5109 hr = renderAudioClient->SetEventHandle( renderEvent );
5110 if ( FAILED( hr ) ) {
5111 errorText = "RtApiWasapi::wasapiThread: Unable to set render event handle.";
5115 ( ( WasapiHandle* ) stream_.apiHandle )->renderClient = renderClient;
5116 ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent = renderEvent;
5118 // reset the render stream
5119 hr = renderAudioClient->Reset();
5120 if ( FAILED( hr ) ) {
5121 errorText = "RtApiWasapi::wasapiThread: Unable to reset render stream.";
5125 // start the render stream
5126 hr = renderAudioClient->Start();
5127 if ( FAILED( hr ) ) {
5128 errorText = "RtApiWasapi::wasapiThread: Unable to start render stream.";
5133 unsigned int outBufferSize = 0;
5134 hr = renderAudioClient->GetBufferSize( &outBufferSize );
5135 if ( FAILED( hr ) ) {
5136 errorText = "RtApiWasapi::wasapiThread: Unable to get render buffer size.";
5140 // scale inBufferSize according to user->stream sample rate ratio
5141 unsigned int inBufferSize = ( unsigned int ) ceilf( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT];
5142 outBufferSize *= stream_.nDeviceChannels[OUTPUT];
5144 // set renderBuffer size
5145 renderBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[OUTPUT] ) );
5148 // malloc buffer memory
5149 if ( stream_.mode == INPUT )
5151 using namespace std; // for ceilf
5152 convBuffSize = ( size_t ) ( ceilf( stream_.bufferSize * captureSrRatio ) ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
5153 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
5155 else if ( stream_.mode == OUTPUT )
5157 convBuffSize = ( size_t ) ( ceilf( stream_.bufferSize * renderSrRatio ) ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
5158 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
5160 else if ( stream_.mode == DUPLEX )
5162 convBuffSize = std::max( ( size_t ) ( ceilf( stream_.bufferSize * captureSrRatio ) ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
5163 ( size_t ) ( ceilf( stream_.bufferSize * renderSrRatio ) ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
5164 deviceBuffSize = std::max( stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
5165 stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
5168 convBuffSize *= 2; // allow overflow for *SrRatio remainders
5169 convBuffer = ( char* ) calloc( convBuffSize, 1 );
5170 stream_.deviceBuffer = ( char* ) calloc( deviceBuffSize, 1 );
5171 if ( !convBuffer || !stream_.deviceBuffer ) {
5172 errorType = RtAudioError::MEMORY_ERROR;
5173 errorText = "RtApiWasapi::wasapiThread: Error allocating device buffer memory.";
5177 // stream process loop
5178 while ( stream_.state != STREAM_STOPPING ) {
5179 if ( !callbackPulled ) {
5182 // 1. Pull callback buffer from inputBuffer
5183 // 2. If 1. was successful: Convert callback buffer to user sample rate and channel count
5184 // Convert callback buffer to user format
5186 if ( captureAudioClient )
5188 int samplesToPull = ( unsigned int ) floorf( stream_.bufferSize * captureSrRatio );
5189 if ( captureSrRatio != 1 )
5191 // account for remainders
5196 while ( convBufferSize < stream_.bufferSize )
5198 // Pull callback buffer from inputBuffer
5199 callbackPulled = captureBuffer.pullBuffer( convBuffer,
5200 samplesToPull * stream_.nDeviceChannels[INPUT],
5201 stream_.deviceFormat[INPUT] );
5203 if ( !callbackPulled )
5208 // Convert callback buffer to user sample rate
5209 unsigned int deviceBufferOffset = convBufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
5210 unsigned int convSamples = 0;
5212 captureResampler->Convert( stream_.deviceBuffer + deviceBufferOffset,
5217 convBufferSize += convSamples;
5218 samplesToPull = 1; // now pull one sample at a time until we have stream_.bufferSize samples
5221 if ( callbackPulled )
5223 if ( stream_.doConvertBuffer[INPUT] ) {
5224 // Convert callback buffer to user format
5225 convertBuffer( stream_.userBuffer[INPUT],
5226 stream_.deviceBuffer,
5227 stream_.convertInfo[INPUT] );
5230 // no further conversion, simple copy deviceBuffer to userBuffer
5231 memcpy( stream_.userBuffer[INPUT],
5232 stream_.deviceBuffer,
5233 stream_.bufferSize * stream_.nUserChannels[INPUT] * formatBytes( stream_.userFormat ) );
5238 // if there is no capture stream, set callbackPulled flag
5239 callbackPulled = true;
5244 // 1. Execute user callback method
5245 // 2. Handle return value from callback
5247 // if callback has not requested the stream to stop
5248 if ( callbackPulled && !callbackStopped ) {
5249 // Execute user callback method
5250 callbackResult = callback( stream_.userBuffer[OUTPUT],
5251 stream_.userBuffer[INPUT],
5254 captureFlags & AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY ? RTAUDIO_INPUT_OVERFLOW : 0,
5255 stream_.callbackInfo.userData );
5258 RtApi::tickStreamTime();
5260 // Handle return value from callback
5261 if ( callbackResult == 1 ) {
5262 // instantiate a thread to stop this thread
5263 HANDLE threadHandle = CreateThread( NULL, 0, stopWasapiThread, this, 0, NULL );
5264 if ( !threadHandle ) {
5265 errorType = RtAudioError::THREAD_ERROR;
5266 errorText = "RtApiWasapi::wasapiThread: Unable to instantiate stream stop thread.";
5269 else if ( !CloseHandle( threadHandle ) ) {
5270 errorType = RtAudioError::THREAD_ERROR;
5271 errorText = "RtApiWasapi::wasapiThread: Unable to close stream stop thread handle.";
5275 callbackStopped = true;
5277 else if ( callbackResult == 2 ) {
5278 // instantiate a thread to stop this thread
5279 HANDLE threadHandle = CreateThread( NULL, 0, abortWasapiThread, this, 0, NULL );
5280 if ( !threadHandle ) {
5281 errorType = RtAudioError::THREAD_ERROR;
5282 errorText = "RtApiWasapi::wasapiThread: Unable to instantiate stream abort thread.";
5285 else if ( !CloseHandle( threadHandle ) ) {
5286 errorType = RtAudioError::THREAD_ERROR;
5287 errorText = "RtApiWasapi::wasapiThread: Unable to close stream abort thread handle.";
5291 callbackStopped = true;
5298 // 1. Convert callback buffer to stream format
5299 // 2. Convert callback buffer to stream sample rate and channel count
5300 // 3. Push callback buffer into outputBuffer
5302 if ( renderAudioClient && callbackPulled )
5304 // if the last call to renderBuffer.PushBuffer() was successful
5305 if ( callbackPushed || convBufferSize == 0 )
5307 if ( stream_.doConvertBuffer[OUTPUT] )
5309 // Convert callback buffer to stream format
5310 convertBuffer( stream_.deviceBuffer,
5311 stream_.userBuffer[OUTPUT],
5312 stream_.convertInfo[OUTPUT] );
5316 // no further conversion, simple copy userBuffer to deviceBuffer
5317 memcpy( stream_.deviceBuffer,
5318 stream_.userBuffer[OUTPUT],
5319 stream_.bufferSize * stream_.nUserChannels[OUTPUT] * formatBytes( stream_.userFormat ) );
5322 // Convert callback buffer to stream sample rate
5323 renderResampler->Convert( convBuffer,
5324 stream_.deviceBuffer,
5329 // Push callback buffer into outputBuffer
5330 callbackPushed = renderBuffer.pushBuffer( convBuffer,
5331 convBufferSize * stream_.nDeviceChannels[OUTPUT],
5332 stream_.deviceFormat[OUTPUT] );
5335 // if there is no render stream, set callbackPushed flag
5336 callbackPushed = true;
5341 // 1. Get capture buffer from stream
5342 // 2. Push capture buffer into inputBuffer
5343 // 3. If 2. was successful: Release capture buffer
5345 if ( captureAudioClient ) {
5346 // if the callback input buffer was not pulled from captureBuffer, wait for next capture event
5347 if ( !callbackPulled ) {
5348 WaitForSingleObject( loopbackEnabled ? renderEvent : captureEvent, INFINITE );
5351 // Get capture buffer from stream
5352 hr = captureClient->GetBuffer( &streamBuffer,
5354 &captureFlags, NULL, NULL );
5355 if ( FAILED( hr ) ) {
5356 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve capture buffer.";
5360 if ( bufferFrameCount != 0 ) {
5361 // Push capture buffer into inputBuffer
5362 if ( captureBuffer.pushBuffer( ( char* ) streamBuffer,
5363 bufferFrameCount * stream_.nDeviceChannels[INPUT],
5364 stream_.deviceFormat[INPUT] ) )
5366 // Release capture buffer
5367 hr = captureClient->ReleaseBuffer( bufferFrameCount );
5368 if ( FAILED( hr ) ) {
5369 errorText = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5375 // Inform WASAPI that capture was unsuccessful
5376 hr = captureClient->ReleaseBuffer( 0 );
5377 if ( FAILED( hr ) ) {
5378 errorText = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5385 // Inform WASAPI that capture was unsuccessful
5386 hr = captureClient->ReleaseBuffer( 0 );
5387 if ( FAILED( hr ) ) {
5388 errorText = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5396 // 1. Get render buffer from stream
5397 // 2. Pull next buffer from outputBuffer
5398 // 3. If 2. was successful: Fill render buffer with next buffer
5399 // Release render buffer
5401 if ( renderAudioClient ) {
5402 // if the callback output buffer was not pushed to renderBuffer, wait for next render event
5403 if ( callbackPulled && !callbackPushed ) {
5404 WaitForSingleObject( renderEvent, INFINITE );
5407 // Get render buffer from stream
5408 hr = renderAudioClient->GetBufferSize( &bufferFrameCount );
5409 if ( FAILED( hr ) ) {
5410 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer size.";
5414 hr = renderAudioClient->GetCurrentPadding( &numFramesPadding );
5415 if ( FAILED( hr ) ) {
5416 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer padding.";
5420 bufferFrameCount -= numFramesPadding;
5422 if ( bufferFrameCount != 0 ) {
5423 hr = renderClient->GetBuffer( bufferFrameCount, &streamBuffer );
5424 if ( FAILED( hr ) ) {
5425 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer.";
5429 // Pull next buffer from outputBuffer
5430 // Fill render buffer with next buffer
5431 if ( renderBuffer.pullBuffer( ( char* ) streamBuffer,
5432 bufferFrameCount * stream_.nDeviceChannels[OUTPUT],
5433 stream_.deviceFormat[OUTPUT] ) )
5435 // Release render buffer
5436 hr = renderClient->ReleaseBuffer( bufferFrameCount, 0 );
5437 if ( FAILED( hr ) ) {
5438 errorText = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5444 // Inform WASAPI that render was unsuccessful
5445 hr = renderClient->ReleaseBuffer( 0, 0 );
5446 if ( FAILED( hr ) ) {
5447 errorText = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5454 // Inform WASAPI that render was unsuccessful
5455 hr = renderClient->ReleaseBuffer( 0, 0 );
5456 if ( FAILED( hr ) ) {
5457 errorText = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5463 // if the callback buffer was pushed renderBuffer reset callbackPulled flag
5464 if ( callbackPushed ) {
5465 // unsetting the callbackPulled flag lets the stream know that
5466 // the audio device is ready for another callback output buffer.
5467 callbackPulled = false;
5474 CoTaskMemFree( captureFormat );
5475 CoTaskMemFree( renderFormat );
5477 free ( convBuffer );
5478 delete renderResampler;
5479 delete captureResampler;
5483 // update stream state
5484 stream_.state = STREAM_STOPPED;
5486 if ( !errorText.empty() )
5488 errorText_ = errorText;
5493 //******************** End of __WINDOWS_WASAPI__ *********************//
5497 #if defined(__WINDOWS_DS__) // Windows DirectSound API
5499 // Modified by Robin Davies, October 2005
5500 // - Improvements to DirectX pointer chasing.
5501 // - Bug fix for non-power-of-two Asio granularity used by Edirol PCR-A30.
5502 // - Auto-call CoInitialize for DSOUND and ASIO platforms.
5503 // Various revisions for RtAudio 4.0 by Gary Scavone, April 2007
5504 // Changed device query structure for RtAudio 4.0.7, January 2010
5506 #include <windows.h>
5507 #include <process.h>
5508 #include <mmsystem.h>
5512 #include <algorithm>
5514 #if defined(__MINGW32__)
5515 // missing from latest mingw winapi
5516 #define WAVE_FORMAT_96M08 0x00010000 /* 96 kHz, Mono, 8-bit */
5517 #define WAVE_FORMAT_96S08 0x00020000 /* 96 kHz, Stereo, 8-bit */
5518 #define WAVE_FORMAT_96M16 0x00040000 /* 96 kHz, Mono, 16-bit */
5519 #define WAVE_FORMAT_96S16 0x00080000 /* 96 kHz, Stereo, 16-bit */
5522 #define MINIMUM_DEVICE_BUFFER_SIZE 32768
5524 #ifdef _MSC_VER // if Microsoft Visual C++
5525 #pragma comment( lib, "winmm.lib" ) // then, auto-link winmm.lib. Otherwise, it has to be added manually.
5528 static inline DWORD dsPointerBetween( DWORD pointer, DWORD laterPointer, DWORD earlierPointer, DWORD bufferSize )
5530 if ( pointer > bufferSize ) pointer -= bufferSize;
5531 if ( laterPointer < earlierPointer ) laterPointer += bufferSize;
5532 if ( pointer < earlierPointer ) pointer += bufferSize;
5533 return pointer >= earlierPointer && pointer < laterPointer;
5536 // A structure to hold various information related to the DirectSound
5537 // API implementation.
5539 unsigned int drainCounter; // Tracks callback counts when draining
5540 bool internalDrain; // Indicates if stop is initiated from callback or not.
5544 UINT bufferPointer[2];
5545 DWORD dsBufferSize[2];
5546 DWORD dsPointerLeadTime[2]; // the number of bytes ahead of the safe pointer to lead by.
5550 :drainCounter(0), internalDrain(false) { id[0] = 0; id[1] = 0; buffer[0] = 0; buffer[1] = 0; xrun[0] = false; xrun[1] = false; bufferPointer[0] = 0; bufferPointer[1] = 0; }
5553 // Declarations for utility functions, callbacks, and structures
5554 // specific to the DirectSound implementation.
5555 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
5556 LPCTSTR description,
5560 static const char* getErrorString( int code );
5562 static unsigned __stdcall callbackHandler( void *ptr );
5571 : found(false) { validId[0] = false; validId[1] = false; }
5574 struct DsProbeData {
5576 std::vector<struct DsDevice>* dsDevices;
5579 RtApiDs :: RtApiDs()
5581 // Dsound will run both-threaded. If CoInitialize fails, then just
5582 // accept whatever the mainline chose for a threading model.
5583 coInitialized_ = false;
5584 HRESULT hr = CoInitialize( NULL );
5585 if ( !FAILED( hr ) ) coInitialized_ = true;
5588 RtApiDs :: ~RtApiDs()
5590 if ( stream_.state != STREAM_CLOSED ) closeStream();
5591 if ( coInitialized_ ) CoUninitialize(); // balanced call.
5594 // The DirectSound default output is always the first device.
5595 unsigned int RtApiDs :: getDefaultOutputDevice( void )
5600 // The DirectSound default input is always the first input device,
5601 // which is the first capture device enumerated.
5602 unsigned int RtApiDs :: getDefaultInputDevice( void )
5607 unsigned int RtApiDs :: getDeviceCount( void )
5609 // Set query flag for previously found devices to false, so that we
5610 // can check for any devices that have disappeared.
5611 for ( unsigned int i=0; i<dsDevices.size(); i++ )
5612 dsDevices[i].found = false;
5614 // Query DirectSound devices.
5615 struct DsProbeData probeInfo;
5616 probeInfo.isInput = false;
5617 probeInfo.dsDevices = &dsDevices;
5618 HRESULT result = DirectSoundEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
5619 if ( FAILED( result ) ) {
5620 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating output devices!";
5621 errorText_ = errorStream_.str();
5622 error( RtAudioError::WARNING );
5625 // Query DirectSoundCapture devices.
5626 probeInfo.isInput = true;
5627 result = DirectSoundCaptureEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
5628 if ( FAILED( result ) ) {
5629 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating input devices!";
5630 errorText_ = errorStream_.str();
5631 error( RtAudioError::WARNING );
5634 // Clean out any devices that may have disappeared (code update submitted by Eli Zehngut).
5635 for ( unsigned int i=0; i<dsDevices.size(); ) {
5636 if ( dsDevices[i].found == false ) dsDevices.erase( dsDevices.begin() + i );
5640 return static_cast<unsigned int>(dsDevices.size());
5643 RtAudio::DeviceInfo RtApiDs :: getDeviceInfo( unsigned int device )
5645 RtAudio::DeviceInfo info;
5646 info.probed = false;
5648 if ( dsDevices.size() == 0 ) {
5649 // Force a query of all devices
5651 if ( dsDevices.size() == 0 ) {
5652 errorText_ = "RtApiDs::getDeviceInfo: no devices found!";
5653 error( RtAudioError::INVALID_USE );
5658 if ( device >= dsDevices.size() ) {
5659 errorText_ = "RtApiDs::getDeviceInfo: device ID is invalid!";
5660 error( RtAudioError::INVALID_USE );
5665 if ( dsDevices[ device ].validId[0] == false ) goto probeInput;
5667 LPDIRECTSOUND output;
5669 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
5670 if ( FAILED( result ) ) {
5671 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
5672 errorText_ = errorStream_.str();
5673 error( RtAudioError::WARNING );
5677 outCaps.dwSize = sizeof( outCaps );
5678 result = output->GetCaps( &outCaps );
5679 if ( FAILED( result ) ) {
5681 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting capabilities!";
5682 errorText_ = errorStream_.str();
5683 error( RtAudioError::WARNING );
5687 // Get output channel information.
5688 info.outputChannels = ( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ? 2 : 1;
5690 // Get sample rate information.
5691 info.sampleRates.clear();
5692 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
5693 if ( SAMPLE_RATES[k] >= (unsigned int) outCaps.dwMinSecondarySampleRate &&
5694 SAMPLE_RATES[k] <= (unsigned int) outCaps.dwMaxSecondarySampleRate ) {
5695 info.sampleRates.push_back( SAMPLE_RATES[k] );
5697 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
5698 info.preferredSampleRate = SAMPLE_RATES[k];
5702 // Get format information.
5703 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT ) info.nativeFormats |= RTAUDIO_SINT16;
5704 if ( outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) info.nativeFormats |= RTAUDIO_SINT8;
5708 if ( getDefaultOutputDevice() == device )
5709 info.isDefaultOutput = true;
5711 if ( dsDevices[ device ].validId[1] == false ) {
5712 info.name = dsDevices[ device ].name;
5719 LPDIRECTSOUNDCAPTURE input;
5720 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
5721 if ( FAILED( result ) ) {
5722 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
5723 errorText_ = errorStream_.str();
5724 error( RtAudioError::WARNING );
5729 inCaps.dwSize = sizeof( inCaps );
5730 result = input->GetCaps( &inCaps );
5731 if ( FAILED( result ) ) {
5733 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting object capabilities (" << dsDevices[ device ].name << ")!";
5734 errorText_ = errorStream_.str();
5735 error( RtAudioError::WARNING );
5739 // Get input channel information.
5740 info.inputChannels = inCaps.dwChannels;
5742 // Get sample rate and format information.
5743 std::vector<unsigned int> rates;
5744 if ( inCaps.dwChannels >= 2 ) {
5745 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5746 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5747 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5748 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5749 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5750 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5751 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5752 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5754 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
5755 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) rates.push_back( 11025 );
5756 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) rates.push_back( 22050 );
5757 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) rates.push_back( 44100 );
5758 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) rates.push_back( 96000 );
5760 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
5761 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) rates.push_back( 11025 );
5762 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) rates.push_back( 22050 );
5763 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) rates.push_back( 44100 );
5764 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) rates.push_back( 96000 );
5767 else if ( inCaps.dwChannels == 1 ) {
5768 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5769 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5770 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5771 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5772 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5773 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5774 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5775 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5777 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
5778 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) rates.push_back( 11025 );
5779 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) rates.push_back( 22050 );
5780 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) rates.push_back( 44100 );
5781 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) rates.push_back( 96000 );
5783 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
5784 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) rates.push_back( 11025 );
5785 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) rates.push_back( 22050 );
5786 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) rates.push_back( 44100 );
5787 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) rates.push_back( 96000 );
5790 else info.inputChannels = 0; // technically, this would be an error
5794 if ( info.inputChannels == 0 ) return info;
5796 // Copy the supported rates to the info structure but avoid duplication.
5798 for ( unsigned int i=0; i<rates.size(); i++ ) {
5800 for ( unsigned int j=0; j<info.sampleRates.size(); j++ ) {
5801 if ( rates[i] == info.sampleRates[j] ) {
5806 if ( found == false ) info.sampleRates.push_back( rates[i] );
5808 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
5810 // If device opens for both playback and capture, we determine the channels.
5811 if ( info.outputChannels > 0 && info.inputChannels > 0 )
5812 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
5814 if ( device == 0 ) info.isDefaultInput = true;
5816 // Copy name and return.
5817 info.name = dsDevices[ device ].name;
5822 bool RtApiDs :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
5823 unsigned int firstChannel, unsigned int sampleRate,
5824 RtAudioFormat format, unsigned int *bufferSize,
5825 RtAudio::StreamOptions *options )
5827 if ( channels + firstChannel > 2 ) {
5828 errorText_ = "RtApiDs::probeDeviceOpen: DirectSound does not support more than 2 channels per device.";
5832 size_t nDevices = dsDevices.size();
5833 if ( nDevices == 0 ) {
5834 // This should not happen because a check is made before this function is called.
5835 errorText_ = "RtApiDs::probeDeviceOpen: no devices found!";
5839 if ( device >= nDevices ) {
5840 // This should not happen because a check is made before this function is called.
5841 errorText_ = "RtApiDs::probeDeviceOpen: device ID is invalid!";
5845 if ( mode == OUTPUT ) {
5846 if ( dsDevices[ device ].validId[0] == false ) {
5847 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support output!";
5848 errorText_ = errorStream_.str();
5852 else { // mode == INPUT
5853 if ( dsDevices[ device ].validId[1] == false ) {
5854 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support input!";
5855 errorText_ = errorStream_.str();
5860 // According to a note in PortAudio, using GetDesktopWindow()
5861 // instead of GetForegroundWindow() is supposed to avoid problems
5862 // that occur when the application's window is not the foreground
5863 // window. Also, if the application window closes before the
5864 // DirectSound buffer, DirectSound can crash. In the past, I had
5865 // problems when using GetDesktopWindow() but it seems fine now
5866 // (January 2010). I'll leave it commented here.
5867 // HWND hWnd = GetForegroundWindow();
5868 HWND hWnd = GetDesktopWindow();
5870 // Check the numberOfBuffers parameter and limit the lowest value to
5871 // two. This is a judgement call and a value of two is probably too
5872 // low for capture, but it should work for playback.
5874 if ( options ) nBuffers = options->numberOfBuffers;
5875 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) nBuffers = 2;
5876 if ( nBuffers < 2 ) nBuffers = 3;
5878 // Check the lower range of the user-specified buffer size and set
5879 // (arbitrarily) to a lower bound of 32.
5880 if ( *bufferSize < 32 ) *bufferSize = 32;
5882 // Create the wave format structure. The data format setting will
5883 // be determined later.
5884 WAVEFORMATEX waveFormat;
5885 ZeroMemory( &waveFormat, sizeof(WAVEFORMATEX) );
5886 waveFormat.wFormatTag = WAVE_FORMAT_PCM;
5887 waveFormat.nChannels = channels + firstChannel;
5888 waveFormat.nSamplesPerSec = (unsigned long) sampleRate;
5890 // Determine the device buffer size. By default, we'll use the value
5891 // defined above (32K), but we will grow it to make allowances for
5892 // very large software buffer sizes.
5893 DWORD dsBufferSize = MINIMUM_DEVICE_BUFFER_SIZE;
5894 DWORD dsPointerLeadTime = 0;
5896 void *ohandle = 0, *bhandle = 0;
5898 if ( mode == OUTPUT ) {
5900 LPDIRECTSOUND output;
5901 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
5902 if ( FAILED( result ) ) {
5903 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
5904 errorText_ = errorStream_.str();
5909 outCaps.dwSize = sizeof( outCaps );
5910 result = output->GetCaps( &outCaps );
5911 if ( FAILED( result ) ) {
5913 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting capabilities (" << dsDevices[ device ].name << ")!";
5914 errorText_ = errorStream_.str();
5918 // Check channel information.
5919 if ( channels + firstChannel == 2 && !( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ) {
5920 errorStream_ << "RtApiDs::getDeviceInfo: the output device (" << dsDevices[ device ].name << ") does not support stereo playback.";
5921 errorText_ = errorStream_.str();
5925 // Check format information. Use 16-bit format unless not
5926 // supported or user requests 8-bit.
5927 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT &&
5928 !( format == RTAUDIO_SINT8 && outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) ) {
5929 waveFormat.wBitsPerSample = 16;
5930 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
5933 waveFormat.wBitsPerSample = 8;
5934 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
5936 stream_.userFormat = format;
5938 // Update wave format structure and buffer information.
5939 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
5940 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
5941 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
5943 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
5944 while ( dsPointerLeadTime * 2U > dsBufferSize )
5947 // Set cooperative level to DSSCL_EXCLUSIVE ... sound stops when window focus changes.
5948 // result = output->SetCooperativeLevel( hWnd, DSSCL_EXCLUSIVE );
5949 // Set cooperative level to DSSCL_PRIORITY ... sound remains when window focus changes.
5950 result = output->SetCooperativeLevel( hWnd, DSSCL_PRIORITY );
5951 if ( FAILED( result ) ) {
5953 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting cooperative level (" << dsDevices[ device ].name << ")!";
5954 errorText_ = errorStream_.str();
5958 // Even though we will write to the secondary buffer, we need to
5959 // access the primary buffer to set the correct output format
5960 // (since the default is 8-bit, 22 kHz!). Setup the DS primary
5961 // buffer description.
5962 DSBUFFERDESC bufferDescription;
5963 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
5964 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
5965 bufferDescription.dwFlags = DSBCAPS_PRIMARYBUFFER;
5967 // Obtain the primary buffer
5968 LPDIRECTSOUNDBUFFER buffer;
5969 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
5970 if ( FAILED( result ) ) {
5972 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") accessing primary buffer (" << dsDevices[ device ].name << ")!";
5973 errorText_ = errorStream_.str();
5977 // Set the primary DS buffer sound format.
5978 result = buffer->SetFormat( &waveFormat );
5979 if ( FAILED( result ) ) {
5981 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting primary buffer format (" << dsDevices[ device ].name << ")!";
5982 errorText_ = errorStream_.str();
5986 // Setup the secondary DS buffer description.
5987 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
5988 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
5989 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
5990 DSBCAPS_GLOBALFOCUS |
5991 DSBCAPS_GETCURRENTPOSITION2 |
5992 DSBCAPS_LOCHARDWARE ); // Force hardware mixing
5993 bufferDescription.dwBufferBytes = dsBufferSize;
5994 bufferDescription.lpwfxFormat = &waveFormat;
5996 // Try to create the secondary DS buffer. If that doesn't work,
5997 // try to use software mixing. Otherwise, there's a problem.
5998 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
5999 if ( FAILED( result ) ) {
6000 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
6001 DSBCAPS_GLOBALFOCUS |
6002 DSBCAPS_GETCURRENTPOSITION2 |
6003 DSBCAPS_LOCSOFTWARE ); // Force software mixing
6004 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
6005 if ( FAILED( result ) ) {
6007 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating secondary buffer (" << dsDevices[ device ].name << ")!";
6008 errorText_ = errorStream_.str();
6013 // Get the buffer size ... might be different from what we specified.
6015 dsbcaps.dwSize = sizeof( DSBCAPS );
6016 result = buffer->GetCaps( &dsbcaps );
6017 if ( FAILED( result ) ) {
6020 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
6021 errorText_ = errorStream_.str();
6025 dsBufferSize = dsbcaps.dwBufferBytes;
6027 // Lock the DS buffer
6030 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
6031 if ( FAILED( result ) ) {
6034 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking buffer (" << dsDevices[ device ].name << ")!";
6035 errorText_ = errorStream_.str();
6039 // Zero the DS buffer
6040 ZeroMemory( audioPtr, dataLen );
6042 // Unlock the DS buffer
6043 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6044 if ( FAILED( result ) ) {
6047 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking buffer (" << dsDevices[ device ].name << ")!";
6048 errorText_ = errorStream_.str();
6052 ohandle = (void *) output;
6053 bhandle = (void *) buffer;
6056 if ( mode == INPUT ) {
6058 LPDIRECTSOUNDCAPTURE input;
6059 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
6060 if ( FAILED( result ) ) {
6061 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
6062 errorText_ = errorStream_.str();
6067 inCaps.dwSize = sizeof( inCaps );
6068 result = input->GetCaps( &inCaps );
6069 if ( FAILED( result ) ) {
6071 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting input capabilities (" << dsDevices[ device ].name << ")!";
6072 errorText_ = errorStream_.str();
6076 // Check channel information.
6077 if ( inCaps.dwChannels < channels + firstChannel ) {
6078 errorText_ = "RtApiDs::getDeviceInfo: the input device does not support requested input channels.";
6082 // Check format information. Use 16-bit format unless user
6084 DWORD deviceFormats;
6085 if ( channels + firstChannel == 2 ) {
6086 deviceFormats = WAVE_FORMAT_1S08 | WAVE_FORMAT_2S08 | WAVE_FORMAT_4S08 | WAVE_FORMAT_96S08;
6087 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
6088 waveFormat.wBitsPerSample = 8;
6089 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
6091 else { // assume 16-bit is supported
6092 waveFormat.wBitsPerSample = 16;
6093 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
6096 else { // channel == 1
6097 deviceFormats = WAVE_FORMAT_1M08 | WAVE_FORMAT_2M08 | WAVE_FORMAT_4M08 | WAVE_FORMAT_96M08;
6098 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
6099 waveFormat.wBitsPerSample = 8;
6100 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
6102 else { // assume 16-bit is supported
6103 waveFormat.wBitsPerSample = 16;
6104 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
6107 stream_.userFormat = format;
6109 // Update wave format structure and buffer information.
6110 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
6111 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
6112 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
6114 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
6115 while ( dsPointerLeadTime * 2U > dsBufferSize )
6118 // Setup the secondary DS buffer description.
6119 DSCBUFFERDESC bufferDescription;
6120 ZeroMemory( &bufferDescription, sizeof( DSCBUFFERDESC ) );
6121 bufferDescription.dwSize = sizeof( DSCBUFFERDESC );
6122 bufferDescription.dwFlags = 0;
6123 bufferDescription.dwReserved = 0;
6124 bufferDescription.dwBufferBytes = dsBufferSize;
6125 bufferDescription.lpwfxFormat = &waveFormat;
6127 // Create the capture buffer.
6128 LPDIRECTSOUNDCAPTUREBUFFER buffer;
6129 result = input->CreateCaptureBuffer( &bufferDescription, &buffer, NULL );
6130 if ( FAILED( result ) ) {
6132 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating input buffer (" << dsDevices[ device ].name << ")!";
6133 errorText_ = errorStream_.str();
6137 // Get the buffer size ... might be different from what we specified.
6139 dscbcaps.dwSize = sizeof( DSCBCAPS );
6140 result = buffer->GetCaps( &dscbcaps );
6141 if ( FAILED( result ) ) {
6144 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
6145 errorText_ = errorStream_.str();
6149 dsBufferSize = dscbcaps.dwBufferBytes;
6151 // NOTE: We could have a problem here if this is a duplex stream
6152 // and the play and capture hardware buffer sizes are different
6153 // (I'm actually not sure if that is a problem or not).
6154 // Currently, we are not verifying that.
6156 // Lock the capture buffer
6159 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
6160 if ( FAILED( result ) ) {
6163 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking input buffer (" << dsDevices[ device ].name << ")!";
6164 errorText_ = errorStream_.str();
6169 ZeroMemory( audioPtr, dataLen );
6171 // Unlock the buffer
6172 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6173 if ( FAILED( result ) ) {
6176 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking input buffer (" << dsDevices[ device ].name << ")!";
6177 errorText_ = errorStream_.str();
6181 ohandle = (void *) input;
6182 bhandle = (void *) buffer;
6185 // Set various stream parameters
6186 DsHandle *handle = 0;
6187 stream_.nDeviceChannels[mode] = channels + firstChannel;
6188 stream_.nUserChannels[mode] = channels;
6189 stream_.bufferSize = *bufferSize;
6190 stream_.channelOffset[mode] = firstChannel;
6191 stream_.deviceInterleaved[mode] = true;
6192 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
6193 else stream_.userInterleaved = true;
6195 // Set flag for buffer conversion
6196 stream_.doConvertBuffer[mode] = false;
6197 if (stream_.nUserChannels[mode] != stream_.nDeviceChannels[mode])
6198 stream_.doConvertBuffer[mode] = true;
6199 if (stream_.userFormat != stream_.deviceFormat[mode])
6200 stream_.doConvertBuffer[mode] = true;
6201 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
6202 stream_.nUserChannels[mode] > 1 )
6203 stream_.doConvertBuffer[mode] = true;
6205 // Allocate necessary internal buffers
6206 long bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
6207 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
6208 if ( stream_.userBuffer[mode] == NULL ) {
6209 errorText_ = "RtApiDs::probeDeviceOpen: error allocating user buffer memory.";
6213 if ( stream_.doConvertBuffer[mode] ) {
6215 bool makeBuffer = true;
6216 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
6217 if ( mode == INPUT ) {
6218 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
6219 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
6220 if ( bufferBytes <= (long) bytesOut ) makeBuffer = false;
6225 bufferBytes *= *bufferSize;
6226 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
6227 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
6228 if ( stream_.deviceBuffer == NULL ) {
6229 errorText_ = "RtApiDs::probeDeviceOpen: error allocating device buffer memory.";
6235 // Allocate our DsHandle structures for the stream.
6236 if ( stream_.apiHandle == 0 ) {
6238 handle = new DsHandle;
6240 catch ( std::bad_alloc& ) {
6241 errorText_ = "RtApiDs::probeDeviceOpen: error allocating AsioHandle memory.";
6245 // Create a manual-reset event.
6246 handle->condition = CreateEvent( NULL, // no security
6247 TRUE, // manual-reset
6248 FALSE, // non-signaled initially
6250 stream_.apiHandle = (void *) handle;
6253 handle = (DsHandle *) stream_.apiHandle;
6254 handle->id[mode] = ohandle;
6255 handle->buffer[mode] = bhandle;
6256 handle->dsBufferSize[mode] = dsBufferSize;
6257 handle->dsPointerLeadTime[mode] = dsPointerLeadTime;
6259 stream_.device[mode] = device;
6260 stream_.state = STREAM_STOPPED;
6261 if ( stream_.mode == OUTPUT && mode == INPUT )
6262 // We had already set up an output stream.
6263 stream_.mode = DUPLEX;
6265 stream_.mode = mode;
6266 stream_.nBuffers = nBuffers;
6267 stream_.sampleRate = sampleRate;
6269 // Setup the buffer conversion information structure.
6270 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
6272 // Setup the callback thread.
6273 if ( stream_.callbackInfo.isRunning == false ) {
6275 stream_.callbackInfo.isRunning = true;
6276 stream_.callbackInfo.object = (void *) this;
6277 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &callbackHandler,
6278 &stream_.callbackInfo, 0, &threadId );
6279 if ( stream_.callbackInfo.thread == 0 ) {
6280 errorText_ = "RtApiDs::probeDeviceOpen: error creating callback thread!";
6284 // Boost DS thread priority
6285 SetThreadPriority( (HANDLE) stream_.callbackInfo.thread, THREAD_PRIORITY_HIGHEST );
6291 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
6292 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
6293 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6294 if ( buffer ) buffer->Release();
6297 if ( handle->buffer[1] ) {
6298 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
6299 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6300 if ( buffer ) buffer->Release();
6303 CloseHandle( handle->condition );
6305 stream_.apiHandle = 0;
6308 for ( int i=0; i<2; i++ ) {
6309 if ( stream_.userBuffer[i] ) {
6310 free( stream_.userBuffer[i] );
6311 stream_.userBuffer[i] = 0;
6315 if ( stream_.deviceBuffer ) {
6316 free( stream_.deviceBuffer );
6317 stream_.deviceBuffer = 0;
6320 stream_.state = STREAM_CLOSED;
6324 void RtApiDs :: closeStream()
6326 if ( stream_.state == STREAM_CLOSED ) {
6327 errorText_ = "RtApiDs::closeStream(): no open stream to close!";
6328 error( RtAudioError::WARNING );
6332 // Stop the callback thread.
6333 stream_.callbackInfo.isRunning = false;
6334 WaitForSingleObject( (HANDLE) stream_.callbackInfo.thread, INFINITE );
6335 CloseHandle( (HANDLE) stream_.callbackInfo.thread );
6337 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6339 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
6340 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
6341 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6348 if ( handle->buffer[1] ) {
6349 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
6350 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6357 CloseHandle( handle->condition );
6359 stream_.apiHandle = 0;
6362 for ( int i=0; i<2; i++ ) {
6363 if ( stream_.userBuffer[i] ) {
6364 free( stream_.userBuffer[i] );
6365 stream_.userBuffer[i] = 0;
6369 if ( stream_.deviceBuffer ) {
6370 free( stream_.deviceBuffer );
6371 stream_.deviceBuffer = 0;
6374 stream_.mode = UNINITIALIZED;
6375 stream_.state = STREAM_CLOSED;
6378 void RtApiDs :: startStream()
6381 RtApi::startStream();
6382 if ( stream_.state == STREAM_RUNNING ) {
6383 errorText_ = "RtApiDs::startStream(): the stream is already running!";
6384 error( RtAudioError::WARNING );
6388 #if defined( HAVE_GETTIMEOFDAY )
6389 gettimeofday( &stream_.lastTickTimestamp, NULL );
6392 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6394 // Increase scheduler frequency on lesser windows (a side-effect of
6395 // increasing timer accuracy). On greater windows (Win2K or later),
6396 // this is already in effect.
6397 timeBeginPeriod( 1 );
6399 buffersRolling = false;
6400 duplexPrerollBytes = 0;
6402 if ( stream_.mode == DUPLEX ) {
6403 // 0.5 seconds of silence in DUPLEX mode while the devices spin up and synchronize.
6404 duplexPrerollBytes = (int) ( 0.5 * stream_.sampleRate * formatBytes( stream_.deviceFormat[1] ) * stream_.nDeviceChannels[1] );
6408 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6410 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6411 result = buffer->Play( 0, 0, DSBPLAY_LOOPING );
6412 if ( FAILED( result ) ) {
6413 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting output buffer!";
6414 errorText_ = errorStream_.str();
6419 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6421 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6422 result = buffer->Start( DSCBSTART_LOOPING );
6423 if ( FAILED( result ) ) {
6424 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting input buffer!";
6425 errorText_ = errorStream_.str();
6430 handle->drainCounter = 0;
6431 handle->internalDrain = false;
6432 ResetEvent( handle->condition );
6433 stream_.state = STREAM_RUNNING;
6436 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
6439 void RtApiDs :: stopStream()
6442 RtApi::startStream();
6443 if ( stream_.state == STREAM_STOPPED ) {
6444 errorText_ = "RtApiDs::stopStream(): the stream is already stopped!";
6445 error( RtAudioError::WARNING );
6452 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6453 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6454 if ( handle->drainCounter == 0 ) {
6455 handle->drainCounter = 2;
6456 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
6459 stream_.state = STREAM_STOPPED;
6461 MUTEX_LOCK( &stream_.mutex );
6463 // Stop the buffer and clear memory
6464 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6465 result = buffer->Stop();
6466 if ( FAILED( result ) ) {
6467 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping output buffer!";
6468 errorText_ = errorStream_.str();
6472 // Lock the buffer and clear it so that if we start to play again,
6473 // we won't have old data playing.
6474 result = buffer->Lock( 0, handle->dsBufferSize[0], &audioPtr, &dataLen, NULL, NULL, 0 );
6475 if ( FAILED( result ) ) {
6476 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking output buffer!";
6477 errorText_ = errorStream_.str();
6481 // Zero the DS buffer
6482 ZeroMemory( audioPtr, dataLen );
6484 // Unlock the DS buffer
6485 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6486 if ( FAILED( result ) ) {
6487 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking output buffer!";
6488 errorText_ = errorStream_.str();
6492 // If we start playing again, we must begin at beginning of buffer.
6493 handle->bufferPointer[0] = 0;
6496 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6497 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6501 stream_.state = STREAM_STOPPED;
6503 if ( stream_.mode != DUPLEX )
6504 MUTEX_LOCK( &stream_.mutex );
6506 result = buffer->Stop();
6507 if ( FAILED( result ) ) {
6508 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping input buffer!";
6509 errorText_ = errorStream_.str();
6513 // Lock the buffer and clear it so that if we start to play again,
6514 // we won't have old data playing.
6515 result = buffer->Lock( 0, handle->dsBufferSize[1], &audioPtr, &dataLen, NULL, NULL, 0 );
6516 if ( FAILED( result ) ) {
6517 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking input buffer!";
6518 errorText_ = errorStream_.str();
6522 // Zero the DS buffer
6523 ZeroMemory( audioPtr, dataLen );
6525 // Unlock the DS buffer
6526 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6527 if ( FAILED( result ) ) {
6528 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking input buffer!";
6529 errorText_ = errorStream_.str();
6533 // If we start recording again, we must begin at beginning of buffer.
6534 handle->bufferPointer[1] = 0;
6538 timeEndPeriod( 1 ); // revert to normal scheduler frequency on lesser windows.
6539 MUTEX_UNLOCK( &stream_.mutex );
6541 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
6544 void RtApiDs :: abortStream()
6547 if ( stream_.state == STREAM_STOPPED ) {
6548 errorText_ = "RtApiDs::abortStream(): the stream is already stopped!";
6549 error( RtAudioError::WARNING );
6553 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6554 handle->drainCounter = 2;
6559 void RtApiDs :: callbackEvent()
6561 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) {
6562 Sleep( 50 ); // sleep 50 milliseconds
6566 if ( stream_.state == STREAM_CLOSED ) {
6567 errorText_ = "RtApiDs::callbackEvent(): the stream is closed ... this shouldn't happen!";
6568 error( RtAudioError::WARNING );
6572 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
6573 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6575 // Check if we were draining the stream and signal is finished.
6576 if ( handle->drainCounter > stream_.nBuffers + 2 ) {
6578 stream_.state = STREAM_STOPPING;
6579 if ( handle->internalDrain == false )
6580 SetEvent( handle->condition );
6586 // Invoke user callback to get fresh output data UNLESS we are
6588 if ( handle->drainCounter == 0 ) {
6589 RtAudioCallback callback = (RtAudioCallback) info->callback;
6590 double streamTime = getStreamTime();
6591 RtAudioStreamStatus status = 0;
6592 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
6593 status |= RTAUDIO_OUTPUT_UNDERFLOW;
6594 handle->xrun[0] = false;
6596 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
6597 status |= RTAUDIO_INPUT_OVERFLOW;
6598 handle->xrun[1] = false;
6600 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
6601 stream_.bufferSize, streamTime, status, info->userData );
6602 if ( cbReturnValue == 2 ) {
6603 stream_.state = STREAM_STOPPING;
6604 handle->drainCounter = 2;
6608 else if ( cbReturnValue == 1 ) {
6609 handle->drainCounter = 1;
6610 handle->internalDrain = true;
6615 DWORD currentWritePointer, safeWritePointer;
6616 DWORD currentReadPointer, safeReadPointer;
6617 UINT nextWritePointer;
6619 LPVOID buffer1 = NULL;
6620 LPVOID buffer2 = NULL;
6621 DWORD bufferSize1 = 0;
6622 DWORD bufferSize2 = 0;
6627 MUTEX_LOCK( &stream_.mutex );
6628 if ( stream_.state == STREAM_STOPPED ) {
6629 MUTEX_UNLOCK( &stream_.mutex );
6633 if ( buffersRolling == false ) {
6634 if ( stream_.mode == DUPLEX ) {
6635 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
6637 // It takes a while for the devices to get rolling. As a result,
6638 // there's no guarantee that the capture and write device pointers
6639 // will move in lockstep. Wait here for both devices to start
6640 // rolling, and then set our buffer pointers accordingly.
6641 // e.g. Crystal Drivers: the capture buffer starts up 5700 to 9600
6642 // bytes later than the write buffer.
6644 // Stub: a serious risk of having a pre-emptive scheduling round
6645 // take place between the two GetCurrentPosition calls... but I'm
6646 // really not sure how to solve the problem. Temporarily boost to
6647 // Realtime priority, maybe; but I'm not sure what priority the
6648 // DirectSound service threads run at. We *should* be roughly
6649 // within a ms or so of correct.
6651 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6652 LPDIRECTSOUNDCAPTUREBUFFER dsCaptureBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6654 DWORD startSafeWritePointer, startSafeReadPointer;
6656 result = dsWriteBuffer->GetCurrentPosition( NULL, &startSafeWritePointer );
6657 if ( FAILED( result ) ) {
6658 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6659 errorText_ = errorStream_.str();
6660 MUTEX_UNLOCK( &stream_.mutex );
6661 error( RtAudioError::SYSTEM_ERROR );
6664 result = dsCaptureBuffer->GetCurrentPosition( NULL, &startSafeReadPointer );
6665 if ( FAILED( result ) ) {
6666 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6667 errorText_ = errorStream_.str();
6668 MUTEX_UNLOCK( &stream_.mutex );
6669 error( RtAudioError::SYSTEM_ERROR );
6673 result = dsWriteBuffer->GetCurrentPosition( NULL, &safeWritePointer );
6674 if ( FAILED( result ) ) {
6675 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6676 errorText_ = errorStream_.str();
6677 MUTEX_UNLOCK( &stream_.mutex );
6678 error( RtAudioError::SYSTEM_ERROR );
6681 result = dsCaptureBuffer->GetCurrentPosition( NULL, &safeReadPointer );
6682 if ( FAILED( result ) ) {
6683 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6684 errorText_ = errorStream_.str();
6685 MUTEX_UNLOCK( &stream_.mutex );
6686 error( RtAudioError::SYSTEM_ERROR );
6689 if ( safeWritePointer != startSafeWritePointer && safeReadPointer != startSafeReadPointer ) break;
6693 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
6695 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
6696 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
6697 handle->bufferPointer[1] = safeReadPointer;
6699 else if ( stream_.mode == OUTPUT ) {
6701 // Set the proper nextWritePosition after initial startup.
6702 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6703 result = dsWriteBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
6704 if ( FAILED( result ) ) {
6705 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6706 errorText_ = errorStream_.str();
6707 MUTEX_UNLOCK( &stream_.mutex );
6708 error( RtAudioError::SYSTEM_ERROR );
6711 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
6712 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
6715 buffersRolling = true;
6718 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6720 LPDIRECTSOUNDBUFFER dsBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6722 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
6723 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
6724 bufferBytes *= formatBytes( stream_.userFormat );
6725 memset( stream_.userBuffer[0], 0, bufferBytes );
6728 // Setup parameters and do buffer conversion if necessary.
6729 if ( stream_.doConvertBuffer[0] ) {
6730 buffer = stream_.deviceBuffer;
6731 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
6732 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[0];
6733 bufferBytes *= formatBytes( stream_.deviceFormat[0] );
6736 buffer = stream_.userBuffer[0];
6737 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
6738 bufferBytes *= formatBytes( stream_.userFormat );
6741 // No byte swapping necessary in DirectSound implementation.
6743 // Ahhh ... windoze. 16-bit data is signed but 8-bit data is
6744 // unsigned. So, we need to convert our signed 8-bit data here to
6746 if ( stream_.deviceFormat[0] == RTAUDIO_SINT8 )
6747 for ( int i=0; i<bufferBytes; i++ ) buffer[i] = (unsigned char) ( buffer[i] + 128 );
6749 DWORD dsBufferSize = handle->dsBufferSize[0];
6750 nextWritePointer = handle->bufferPointer[0];
6752 DWORD endWrite, leadPointer;
6754 // Find out where the read and "safe write" pointers are.
6755 result = dsBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
6756 if ( FAILED( result ) ) {
6757 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6758 errorText_ = errorStream_.str();
6759 MUTEX_UNLOCK( &stream_.mutex );
6760 error( RtAudioError::SYSTEM_ERROR );
6764 // We will copy our output buffer into the region between
6765 // safeWritePointer and leadPointer. If leadPointer is not
6766 // beyond the next endWrite position, wait until it is.
6767 leadPointer = safeWritePointer + handle->dsPointerLeadTime[0];
6768 //std::cout << "safeWritePointer = " << safeWritePointer << ", leadPointer = " << leadPointer << ", nextWritePointer = " << nextWritePointer << std::endl;
6769 if ( leadPointer > dsBufferSize ) leadPointer -= dsBufferSize;
6770 if ( leadPointer < nextWritePointer ) leadPointer += dsBufferSize; // unwrap offset
6771 endWrite = nextWritePointer + bufferBytes;
6773 // Check whether the entire write region is behind the play pointer.
6774 if ( leadPointer >= endWrite ) break;
6776 // If we are here, then we must wait until the leadPointer advances
6777 // beyond the end of our next write region. We use the
6778 // Sleep() function to suspend operation until that happens.
6779 double millis = ( endWrite - leadPointer ) * 1000.0;
6780 millis /= ( formatBytes( stream_.deviceFormat[0]) * stream_.nDeviceChannels[0] * stream_.sampleRate);
6781 if ( millis < 1.0 ) millis = 1.0;
6782 Sleep( (DWORD) millis );
6785 if ( dsPointerBetween( nextWritePointer, safeWritePointer, currentWritePointer, dsBufferSize )
6786 || dsPointerBetween( endWrite, safeWritePointer, currentWritePointer, dsBufferSize ) ) {
6787 // We've strayed into the forbidden zone ... resync the read pointer.
6788 handle->xrun[0] = true;
6789 nextWritePointer = safeWritePointer + handle->dsPointerLeadTime[0] - bufferBytes;
6790 if ( nextWritePointer >= dsBufferSize ) nextWritePointer -= dsBufferSize;
6791 handle->bufferPointer[0] = nextWritePointer;
6792 endWrite = nextWritePointer + bufferBytes;
6795 // Lock free space in the buffer
6796 result = dsBuffer->Lock( nextWritePointer, bufferBytes, &buffer1,
6797 &bufferSize1, &buffer2, &bufferSize2, 0 );
6798 if ( FAILED( result ) ) {
6799 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking buffer during playback!";
6800 errorText_ = errorStream_.str();
6801 MUTEX_UNLOCK( &stream_.mutex );
6802 error( RtAudioError::SYSTEM_ERROR );
6806 // Copy our buffer into the DS buffer
6807 CopyMemory( buffer1, buffer, bufferSize1 );
6808 if ( buffer2 != NULL ) CopyMemory( buffer2, buffer+bufferSize1, bufferSize2 );
6810 // Update our buffer offset and unlock sound buffer
6811 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
6812 if ( FAILED( result ) ) {
6813 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking buffer during playback!";
6814 errorText_ = errorStream_.str();
6815 MUTEX_UNLOCK( &stream_.mutex );
6816 error( RtAudioError::SYSTEM_ERROR );
6819 nextWritePointer = ( nextWritePointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
6820 handle->bufferPointer[0] = nextWritePointer;
6823 // Don't bother draining input
6824 if ( handle->drainCounter ) {
6825 handle->drainCounter++;
6829 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6831 // Setup parameters.
6832 if ( stream_.doConvertBuffer[1] ) {
6833 buffer = stream_.deviceBuffer;
6834 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[1];
6835 bufferBytes *= formatBytes( stream_.deviceFormat[1] );
6838 buffer = stream_.userBuffer[1];
6839 bufferBytes = stream_.bufferSize * stream_.nUserChannels[1];
6840 bufferBytes *= formatBytes( stream_.userFormat );
6843 LPDIRECTSOUNDCAPTUREBUFFER dsBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6844 long nextReadPointer = handle->bufferPointer[1];
6845 DWORD dsBufferSize = handle->dsBufferSize[1];
6847 // Find out where the write and "safe read" pointers are.
6848 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
6849 if ( FAILED( result ) ) {
6850 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6851 errorText_ = errorStream_.str();
6852 MUTEX_UNLOCK( &stream_.mutex );
6853 error( RtAudioError::SYSTEM_ERROR );
6857 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
6858 DWORD endRead = nextReadPointer + bufferBytes;
6860 // Handling depends on whether we are INPUT or DUPLEX.
6861 // If we're in INPUT mode then waiting is a good thing. If we're in DUPLEX mode,
6862 // then a wait here will drag the write pointers into the forbidden zone.
6864 // In DUPLEX mode, rather than wait, we will back off the read pointer until
6865 // it's in a safe position. This causes dropouts, but it seems to be the only
6866 // practical way to sync up the read and write pointers reliably, given the
6867 // the very complex relationship between phase and increment of the read and write
6870 // In order to minimize audible dropouts in DUPLEX mode, we will
6871 // provide a pre-roll period of 0.5 seconds in which we return
6872 // zeros from the read buffer while the pointers sync up.
6874 if ( stream_.mode == DUPLEX ) {
6875 if ( safeReadPointer < endRead ) {
6876 if ( duplexPrerollBytes <= 0 ) {
6877 // Pre-roll time over. Be more agressive.
6878 int adjustment = endRead-safeReadPointer;
6880 handle->xrun[1] = true;
6882 // - large adjustments: we've probably run out of CPU cycles, so just resync exactly,
6883 // and perform fine adjustments later.
6884 // - small adjustments: back off by twice as much.
6885 if ( adjustment >= 2*bufferBytes )
6886 nextReadPointer = safeReadPointer-2*bufferBytes;
6888 nextReadPointer = safeReadPointer-bufferBytes-adjustment;
6890 if ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
6894 // In pre=roll time. Just do it.
6895 nextReadPointer = safeReadPointer - bufferBytes;
6896 while ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
6898 endRead = nextReadPointer + bufferBytes;
6901 else { // mode == INPUT
6902 while ( safeReadPointer < endRead && stream_.callbackInfo.isRunning ) {
6903 // See comments for playback.
6904 double millis = (endRead - safeReadPointer) * 1000.0;
6905 millis /= ( formatBytes(stream_.deviceFormat[1]) * stream_.nDeviceChannels[1] * stream_.sampleRate);
6906 if ( millis < 1.0 ) millis = 1.0;
6907 Sleep( (DWORD) millis );
6909 // Wake up and find out where we are now.
6910 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
6911 if ( FAILED( result ) ) {
6912 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6913 errorText_ = errorStream_.str();
6914 MUTEX_UNLOCK( &stream_.mutex );
6915 error( RtAudioError::SYSTEM_ERROR );
6919 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
6923 // Lock free space in the buffer
6924 result = dsBuffer->Lock( nextReadPointer, bufferBytes, &buffer1,
6925 &bufferSize1, &buffer2, &bufferSize2, 0 );
6926 if ( FAILED( result ) ) {
6927 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking capture buffer!";
6928 errorText_ = errorStream_.str();
6929 MUTEX_UNLOCK( &stream_.mutex );
6930 error( RtAudioError::SYSTEM_ERROR );
6934 if ( duplexPrerollBytes <= 0 ) {
6935 // Copy our buffer into the DS buffer
6936 CopyMemory( buffer, buffer1, bufferSize1 );
6937 if ( buffer2 != NULL ) CopyMemory( buffer+bufferSize1, buffer2, bufferSize2 );
6940 memset( buffer, 0, bufferSize1 );
6941 if ( buffer2 != NULL ) memset( buffer + bufferSize1, 0, bufferSize2 );
6942 duplexPrerollBytes -= bufferSize1 + bufferSize2;
6945 // Update our buffer offset and unlock sound buffer
6946 nextReadPointer = ( nextReadPointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
6947 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
6948 if ( FAILED( result ) ) {
6949 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking capture buffer!";
6950 errorText_ = errorStream_.str();
6951 MUTEX_UNLOCK( &stream_.mutex );
6952 error( RtAudioError::SYSTEM_ERROR );
6955 handle->bufferPointer[1] = nextReadPointer;
6957 // No byte swapping necessary in DirectSound implementation.
6959 // If necessary, convert 8-bit data from unsigned to signed.
6960 if ( stream_.deviceFormat[1] == RTAUDIO_SINT8 )
6961 for ( int j=0; j<bufferBytes; j++ ) buffer[j] = (signed char) ( buffer[j] - 128 );
6963 // Do buffer conversion if necessary.
6964 if ( stream_.doConvertBuffer[1] )
6965 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
6969 MUTEX_UNLOCK( &stream_.mutex );
6970 RtApi::tickStreamTime();
6973 // Definitions for utility functions and callbacks
6974 // specific to the DirectSound implementation.
6976 static unsigned __stdcall callbackHandler( void *ptr )
6978 CallbackInfo *info = (CallbackInfo *) ptr;
6979 RtApiDs *object = (RtApiDs *) info->object;
6980 bool* isRunning = &info->isRunning;
6982 while ( *isRunning == true ) {
6983 object->callbackEvent();
6990 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
6991 LPCTSTR description,
6995 struct DsProbeData& probeInfo = *(struct DsProbeData*) lpContext;
6996 std::vector<struct DsDevice>& dsDevices = *probeInfo.dsDevices;
6999 bool validDevice = false;
7000 if ( probeInfo.isInput == true ) {
7002 LPDIRECTSOUNDCAPTURE object;
7004 hr = DirectSoundCaptureCreate( lpguid, &object, NULL );
7005 if ( hr != DS_OK ) return TRUE;
7007 caps.dwSize = sizeof(caps);
7008 hr = object->GetCaps( &caps );
7009 if ( hr == DS_OK ) {
7010 if ( caps.dwChannels > 0 && caps.dwFormats > 0 )
7017 LPDIRECTSOUND object;
7018 hr = DirectSoundCreate( lpguid, &object, NULL );
7019 if ( hr != DS_OK ) return TRUE;
7021 caps.dwSize = sizeof(caps);
7022 hr = object->GetCaps( &caps );
7023 if ( hr == DS_OK ) {
7024 if ( caps.dwFlags & DSCAPS_PRIMARYMONO || caps.dwFlags & DSCAPS_PRIMARYSTEREO )
7030 // If good device, then save its name and guid.
7031 std::string name = convertCharPointerToStdString( description );
7032 //if ( name == "Primary Sound Driver" || name == "Primary Sound Capture Driver" )
7033 if ( lpguid == NULL )
7034 name = "Default Device";
7035 if ( validDevice ) {
7036 for ( unsigned int i=0; i<dsDevices.size(); i++ ) {
7037 if ( dsDevices[i].name == name ) {
7038 dsDevices[i].found = true;
7039 if ( probeInfo.isInput ) {
7040 dsDevices[i].id[1] = lpguid;
7041 dsDevices[i].validId[1] = true;
7044 dsDevices[i].id[0] = lpguid;
7045 dsDevices[i].validId[0] = true;
7053 device.found = true;
7054 if ( probeInfo.isInput ) {
7055 device.id[1] = lpguid;
7056 device.validId[1] = true;
7059 device.id[0] = lpguid;
7060 device.validId[0] = true;
7062 dsDevices.push_back( device );
7068 static const char* getErrorString( int code )
7072 case DSERR_ALLOCATED:
7073 return "Already allocated";
7075 case DSERR_CONTROLUNAVAIL:
7076 return "Control unavailable";
7078 case DSERR_INVALIDPARAM:
7079 return "Invalid parameter";
7081 case DSERR_INVALIDCALL:
7082 return "Invalid call";
7085 return "Generic error";
7087 case DSERR_PRIOLEVELNEEDED:
7088 return "Priority level needed";
7090 case DSERR_OUTOFMEMORY:
7091 return "Out of memory";
7093 case DSERR_BADFORMAT:
7094 return "The sample rate or the channel format is not supported";
7096 case DSERR_UNSUPPORTED:
7097 return "Not supported";
7099 case DSERR_NODRIVER:
7102 case DSERR_ALREADYINITIALIZED:
7103 return "Already initialized";
7105 case DSERR_NOAGGREGATION:
7106 return "No aggregation";
7108 case DSERR_BUFFERLOST:
7109 return "Buffer lost";
7111 case DSERR_OTHERAPPHASPRIO:
7112 return "Another application already has priority";
7114 case DSERR_UNINITIALIZED:
7115 return "Uninitialized";
7118 return "DirectSound unknown error";
7121 //******************** End of __WINDOWS_DS__ *********************//
7125 #if defined(__LINUX_ALSA__)
7127 #include <alsa/asoundlib.h>
7130 // A structure to hold various information related to the ALSA API
7133 snd_pcm_t *handles[2];
7136 pthread_cond_t runnable_cv;
7140 :synchronized(false), runnable(false) { xrun[0] = false; xrun[1] = false; }
7143 static void *alsaCallbackHandler( void * ptr );
7145 RtApiAlsa :: RtApiAlsa()
7147 // Nothing to do here.
7150 RtApiAlsa :: ~RtApiAlsa()
7152 if ( stream_.state != STREAM_CLOSED ) closeStream();
7155 unsigned int RtApiAlsa :: getDeviceCount( void )
7157 unsigned nDevices = 0;
7158 int result, subdevice, card;
7160 snd_ctl_t *handle = 0;
7162 // Count cards and devices
7164 snd_card_next( &card );
7165 while ( card >= 0 ) {
7166 sprintf( name, "hw:%d", card );
7167 result = snd_ctl_open( &handle, name, 0 );
7170 errorStream_ << "RtApiAlsa::getDeviceCount: control open, card = " << card << ", " << snd_strerror( result ) << ".";
7171 errorText_ = errorStream_.str();
7172 error( RtAudioError::WARNING );
7177 result = snd_ctl_pcm_next_device( handle, &subdevice );
7179 errorStream_ << "RtApiAlsa::getDeviceCount: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
7180 errorText_ = errorStream_.str();
7181 error( RtAudioError::WARNING );
7184 if ( subdevice < 0 )
7190 snd_ctl_close( handle );
7191 snd_card_next( &card );
7194 result = snd_ctl_open( &handle, "default", 0 );
7197 snd_ctl_close( handle );
7203 RtAudio::DeviceInfo RtApiAlsa :: getDeviceInfo( unsigned int device )
7205 RtAudio::DeviceInfo info;
7206 info.probed = false;
7208 unsigned nDevices = 0;
7209 int result, subdevice, card;
7211 snd_ctl_t *chandle = 0;
7213 // Count cards and devices
7216 snd_card_next( &card );
7217 while ( card >= 0 ) {
7218 sprintf( name, "hw:%d", card );
7219 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
7222 errorStream_ << "RtApiAlsa::getDeviceInfo: control open, card = " << card << ", " << snd_strerror( result ) << ".";
7223 errorText_ = errorStream_.str();
7224 error( RtAudioError::WARNING );
7229 result = snd_ctl_pcm_next_device( chandle, &subdevice );
7231 errorStream_ << "RtApiAlsa::getDeviceInfo: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
7232 errorText_ = errorStream_.str();
7233 error( RtAudioError::WARNING );
7236 if ( subdevice < 0 ) break;
7237 if ( nDevices == device ) {
7238 sprintf( name, "hw:%d,%d", card, subdevice );
7245 snd_ctl_close( chandle );
7246 snd_card_next( &card );
7249 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
7250 if ( result == 0 ) {
7251 if ( nDevices == device ) {
7252 strcpy( name, "default" );
7258 if ( nDevices == 0 ) {
7259 errorText_ = "RtApiAlsa::getDeviceInfo: no devices found!";
7260 error( RtAudioError::INVALID_USE );
7264 if ( device >= nDevices ) {
7265 errorText_ = "RtApiAlsa::getDeviceInfo: device ID is invalid!";
7266 error( RtAudioError::INVALID_USE );
7272 // If a stream is already open, we cannot probe the stream devices.
7273 // Thus, use the saved results.
7274 if ( stream_.state != STREAM_CLOSED &&
7275 ( stream_.device[0] == device || stream_.device[1] == device ) ) {
7276 snd_ctl_close( chandle );
7277 if ( device >= devices_.size() ) {
7278 errorText_ = "RtApiAlsa::getDeviceInfo: device ID was not present before stream was opened.";
7279 error( RtAudioError::WARNING );
7282 return devices_[ device ];
7285 int openMode = SND_PCM_ASYNC;
7286 snd_pcm_stream_t stream;
7287 snd_pcm_info_t *pcminfo;
7288 snd_pcm_info_alloca( &pcminfo );
7290 snd_pcm_hw_params_t *params;
7291 snd_pcm_hw_params_alloca( ¶ms );
7293 // First try for playback unless default device (which has subdev -1)
7294 stream = SND_PCM_STREAM_PLAYBACK;
7295 snd_pcm_info_set_stream( pcminfo, stream );
7296 if ( subdevice != -1 ) {
7297 snd_pcm_info_set_device( pcminfo, subdevice );
7298 snd_pcm_info_set_subdevice( pcminfo, 0 );
7300 result = snd_ctl_pcm_info( chandle, pcminfo );
7302 // Device probably doesn't support playback.
7307 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK );
7309 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7310 errorText_ = errorStream_.str();
7311 error( RtAudioError::WARNING );
7315 // The device is open ... fill the parameter structure.
7316 result = snd_pcm_hw_params_any( phandle, params );
7318 snd_pcm_close( phandle );
7319 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7320 errorText_ = errorStream_.str();
7321 error( RtAudioError::WARNING );
7325 // Get output channel information.
7327 result = snd_pcm_hw_params_get_channels_max( params, &value );
7329 snd_pcm_close( phandle );
7330 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") output channels, " << snd_strerror( result ) << ".";
7331 errorText_ = errorStream_.str();
7332 error( RtAudioError::WARNING );
7335 info.outputChannels = value;
7336 snd_pcm_close( phandle );
7339 stream = SND_PCM_STREAM_CAPTURE;
7340 snd_pcm_info_set_stream( pcminfo, stream );
7342 // Now try for capture unless default device (with subdev = -1)
7343 if ( subdevice != -1 ) {
7344 result = snd_ctl_pcm_info( chandle, pcminfo );
7345 snd_ctl_close( chandle );
7347 // Device probably doesn't support capture.
7348 if ( info.outputChannels == 0 ) return info;
7349 goto probeParameters;
7353 snd_ctl_close( chandle );
7355 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
7357 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7358 errorText_ = errorStream_.str();
7359 error( RtAudioError::WARNING );
7360 if ( info.outputChannels == 0 ) return info;
7361 goto probeParameters;
7364 // The device is open ... fill the parameter structure.
7365 result = snd_pcm_hw_params_any( phandle, params );
7367 snd_pcm_close( phandle );
7368 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7369 errorText_ = errorStream_.str();
7370 error( RtAudioError::WARNING );
7371 if ( info.outputChannels == 0 ) return info;
7372 goto probeParameters;
7375 result = snd_pcm_hw_params_get_channels_max( params, &value );
7377 snd_pcm_close( phandle );
7378 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") input channels, " << snd_strerror( result ) << ".";
7379 errorText_ = errorStream_.str();
7380 error( RtAudioError::WARNING );
7381 if ( info.outputChannels == 0 ) return info;
7382 goto probeParameters;
7384 info.inputChannels = value;
7385 snd_pcm_close( phandle );
7387 // If device opens for both playback and capture, we determine the channels.
7388 if ( info.outputChannels > 0 && info.inputChannels > 0 )
7389 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
7391 // ALSA doesn't provide default devices so we'll use the first available one.
7392 if ( device == 0 && info.outputChannels > 0 )
7393 info.isDefaultOutput = true;
7394 if ( device == 0 && info.inputChannels > 0 )
7395 info.isDefaultInput = true;
7398 // At this point, we just need to figure out the supported data
7399 // formats and sample rates. We'll proceed by opening the device in
7400 // the direction with the maximum number of channels, or playback if
7401 // they are equal. This might limit our sample rate options, but so
7404 if ( info.outputChannels >= info.inputChannels )
7405 stream = SND_PCM_STREAM_PLAYBACK;
7407 stream = SND_PCM_STREAM_CAPTURE;
7408 snd_pcm_info_set_stream( pcminfo, stream );
7410 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
7412 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7413 errorText_ = errorStream_.str();
7414 error( RtAudioError::WARNING );
7418 // The device is open ... fill the parameter structure.
7419 result = snd_pcm_hw_params_any( phandle, params );
7421 snd_pcm_close( phandle );
7422 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7423 errorText_ = errorStream_.str();
7424 error( RtAudioError::WARNING );
7428 // Test our discrete set of sample rate values.
7429 info.sampleRates.clear();
7430 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
7431 if ( snd_pcm_hw_params_test_rate( phandle, params, SAMPLE_RATES[i], 0 ) == 0 ) {
7432 info.sampleRates.push_back( SAMPLE_RATES[i] );
7434 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
7435 info.preferredSampleRate = SAMPLE_RATES[i];
7438 if ( info.sampleRates.size() == 0 ) {
7439 snd_pcm_close( phandle );
7440 errorStream_ << "RtApiAlsa::getDeviceInfo: no supported sample rates found for device (" << name << ").";
7441 errorText_ = errorStream_.str();
7442 error( RtAudioError::WARNING );
7446 // Probe the supported data formats ... we don't care about endian-ness just yet
7447 snd_pcm_format_t format;
7448 info.nativeFormats = 0;
7449 format = SND_PCM_FORMAT_S8;
7450 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7451 info.nativeFormats |= RTAUDIO_SINT8;
7452 format = SND_PCM_FORMAT_S16;
7453 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7454 info.nativeFormats |= RTAUDIO_SINT16;
7455 format = SND_PCM_FORMAT_S24;
7456 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7457 info.nativeFormats |= RTAUDIO_SINT24;
7458 format = SND_PCM_FORMAT_S32;
7459 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7460 info.nativeFormats |= RTAUDIO_SINT32;
7461 format = SND_PCM_FORMAT_FLOAT;
7462 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7463 info.nativeFormats |= RTAUDIO_FLOAT32;
7464 format = SND_PCM_FORMAT_FLOAT64;
7465 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7466 info.nativeFormats |= RTAUDIO_FLOAT64;
7468 // Check that we have at least one supported format
7469 if ( info.nativeFormats == 0 ) {
7470 snd_pcm_close( phandle );
7471 errorStream_ << "RtApiAlsa::getDeviceInfo: pcm device (" << name << ") data format not supported by RtAudio.";
7472 errorText_ = errorStream_.str();
7473 error( RtAudioError::WARNING );
7477 // Get the device name
7479 result = snd_card_get_name( card, &cardname );
7480 if ( result >= 0 ) {
7481 sprintf( name, "hw:%s,%d", cardname, subdevice );
7486 // That's all ... close the device and return
7487 snd_pcm_close( phandle );
7492 void RtApiAlsa :: saveDeviceInfo( void )
7496 unsigned int nDevices = getDeviceCount();
7497 devices_.resize( nDevices );
7498 for ( unsigned int i=0; i<nDevices; i++ )
7499 devices_[i] = getDeviceInfo( i );
7502 bool RtApiAlsa :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
7503 unsigned int firstChannel, unsigned int sampleRate,
7504 RtAudioFormat format, unsigned int *bufferSize,
7505 RtAudio::StreamOptions *options )
7508 #if defined(__RTAUDIO_DEBUG__)
7510 snd_output_stdio_attach(&out, stderr, 0);
7513 // I'm not using the "plug" interface ... too much inconsistent behavior.
7515 unsigned nDevices = 0;
7516 int result, subdevice, card;
7520 if ( options && options->flags & RTAUDIO_ALSA_USE_DEFAULT )
7521 snprintf(name, sizeof(name), "%s", "default");
7523 // Count cards and devices
7525 snd_card_next( &card );
7526 while ( card >= 0 ) {
7527 sprintf( name, "hw:%d", card );
7528 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
7530 errorStream_ << "RtApiAlsa::probeDeviceOpen: control open, card = " << card << ", " << snd_strerror( result ) << ".";
7531 errorText_ = errorStream_.str();
7536 result = snd_ctl_pcm_next_device( chandle, &subdevice );
7537 if ( result < 0 ) break;
7538 if ( subdevice < 0 ) break;
7539 if ( nDevices == device ) {
7540 sprintf( name, "hw:%d,%d", card, subdevice );
7541 snd_ctl_close( chandle );
7546 snd_ctl_close( chandle );
7547 snd_card_next( &card );
7550 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
7551 if ( result == 0 ) {
7552 if ( nDevices == device ) {
7553 strcpy( name, "default" );
7554 snd_ctl_close( chandle );
7559 snd_ctl_close( chandle );
7561 if ( nDevices == 0 ) {
7562 // This should not happen because a check is made before this function is called.
7563 errorText_ = "RtApiAlsa::probeDeviceOpen: no devices found!";
7567 if ( device >= nDevices ) {
7568 // This should not happen because a check is made before this function is called.
7569 errorText_ = "RtApiAlsa::probeDeviceOpen: device ID is invalid!";
7576 // The getDeviceInfo() function will not work for a device that is
7577 // already open. Thus, we'll probe the system before opening a
7578 // stream and save the results for use by getDeviceInfo().
7579 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) // only do once
7580 this->saveDeviceInfo();
7582 snd_pcm_stream_t stream;
7583 if ( mode == OUTPUT )
7584 stream = SND_PCM_STREAM_PLAYBACK;
7586 stream = SND_PCM_STREAM_CAPTURE;
7589 int openMode = SND_PCM_ASYNC;
7590 result = snd_pcm_open( &phandle, name, stream, openMode );
7592 if ( mode == OUTPUT )
7593 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for output.";
7595 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for input.";
7596 errorText_ = errorStream_.str();
7600 // Fill the parameter structure.
7601 snd_pcm_hw_params_t *hw_params;
7602 snd_pcm_hw_params_alloca( &hw_params );
7603 result = snd_pcm_hw_params_any( phandle, hw_params );
7605 snd_pcm_close( phandle );
7606 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") parameters, " << snd_strerror( result ) << ".";
7607 errorText_ = errorStream_.str();
7611 #if defined(__RTAUDIO_DEBUG__)
7612 fprintf( stderr, "\nRtApiAlsa: dump hardware params just after device open:\n\n" );
7613 snd_pcm_hw_params_dump( hw_params, out );
7616 // Set access ... check user preference.
7617 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) {
7618 stream_.userInterleaved = false;
7619 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
7621 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
7622 stream_.deviceInterleaved[mode] = true;
7625 stream_.deviceInterleaved[mode] = false;
7628 stream_.userInterleaved = true;
7629 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
7631 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
7632 stream_.deviceInterleaved[mode] = false;
7635 stream_.deviceInterleaved[mode] = true;
7639 snd_pcm_close( phandle );
7640 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") access, " << snd_strerror( result ) << ".";
7641 errorText_ = errorStream_.str();
7645 // Determine how to set the device format.
7646 stream_.userFormat = format;
7647 snd_pcm_format_t deviceFormat = SND_PCM_FORMAT_UNKNOWN;
7649 if ( format == RTAUDIO_SINT8 )
7650 deviceFormat = SND_PCM_FORMAT_S8;
7651 else if ( format == RTAUDIO_SINT16 )
7652 deviceFormat = SND_PCM_FORMAT_S16;
7653 else if ( format == RTAUDIO_SINT24 )
7654 deviceFormat = SND_PCM_FORMAT_S24;
7655 else if ( format == RTAUDIO_SINT32 )
7656 deviceFormat = SND_PCM_FORMAT_S32;
7657 else if ( format == RTAUDIO_FLOAT32 )
7658 deviceFormat = SND_PCM_FORMAT_FLOAT;
7659 else if ( format == RTAUDIO_FLOAT64 )
7660 deviceFormat = SND_PCM_FORMAT_FLOAT64;
7662 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat) == 0) {
7663 stream_.deviceFormat[mode] = format;
7667 // The user requested format is not natively supported by the device.
7668 deviceFormat = SND_PCM_FORMAT_FLOAT64;
7669 if ( snd_pcm_hw_params_test_format( phandle, hw_params, deviceFormat ) == 0 ) {
7670 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
7674 deviceFormat = SND_PCM_FORMAT_FLOAT;
7675 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7676 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
7680 deviceFormat = SND_PCM_FORMAT_S32;
7681 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7682 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
7686 deviceFormat = SND_PCM_FORMAT_S24;
7687 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7688 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
7692 deviceFormat = SND_PCM_FORMAT_S16;
7693 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7694 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
7698 deviceFormat = SND_PCM_FORMAT_S8;
7699 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7700 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
7704 // If we get here, no supported format was found.
7705 snd_pcm_close( phandle );
7706 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device " << device << " data format not supported by RtAudio.";
7707 errorText_ = errorStream_.str();
7711 result = snd_pcm_hw_params_set_format( phandle, hw_params, deviceFormat );
7713 snd_pcm_close( phandle );
7714 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") data format, " << snd_strerror( result ) << ".";
7715 errorText_ = errorStream_.str();
7719 // Determine whether byte-swaping is necessary.
7720 stream_.doByteSwap[mode] = false;
7721 if ( deviceFormat != SND_PCM_FORMAT_S8 ) {
7722 result = snd_pcm_format_cpu_endian( deviceFormat );
7724 stream_.doByteSwap[mode] = true;
7725 else if (result < 0) {
7726 snd_pcm_close( phandle );
7727 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") endian-ness, " << snd_strerror( result ) << ".";
7728 errorText_ = errorStream_.str();
7733 // Set the sample rate.
7734 result = snd_pcm_hw_params_set_rate_near( phandle, hw_params, (unsigned int*) &sampleRate, 0 );
7736 snd_pcm_close( phandle );
7737 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting sample rate on device (" << name << "), " << snd_strerror( result ) << ".";
7738 errorText_ = errorStream_.str();
7742 // Determine the number of channels for this device. We support a possible
7743 // minimum device channel number > than the value requested by the user.
7744 stream_.nUserChannels[mode] = channels;
7746 result = snd_pcm_hw_params_get_channels_max( hw_params, &value );
7747 unsigned int deviceChannels = value;
7748 if ( result < 0 || deviceChannels < channels + firstChannel ) {
7749 snd_pcm_close( phandle );
7750 errorStream_ << "RtApiAlsa::probeDeviceOpen: requested channel parameters not supported by device (" << name << "), " << snd_strerror( result ) << ".";
7751 errorText_ = errorStream_.str();
7755 result = snd_pcm_hw_params_get_channels_min( hw_params, &value );
7757 snd_pcm_close( phandle );
7758 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting minimum channels for device (" << name << "), " << snd_strerror( result ) << ".";
7759 errorText_ = errorStream_.str();
7762 deviceChannels = value;
7763 if ( deviceChannels < channels + firstChannel ) deviceChannels = channels + firstChannel;
7764 stream_.nDeviceChannels[mode] = deviceChannels;
7766 // Set the device channels.
7767 result = snd_pcm_hw_params_set_channels( phandle, hw_params, deviceChannels );
7769 snd_pcm_close( phandle );
7770 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting channels for device (" << name << "), " << snd_strerror( result ) << ".";
7771 errorText_ = errorStream_.str();
7775 // Set the buffer (or period) size.
7777 snd_pcm_uframes_t periodSize = *bufferSize;
7778 result = snd_pcm_hw_params_set_period_size_near( phandle, hw_params, &periodSize, &dir );
7780 snd_pcm_close( phandle );
7781 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting period size for device (" << name << "), " << snd_strerror( result ) << ".";
7782 errorText_ = errorStream_.str();
7785 *bufferSize = periodSize;
7787 // Set the buffer number, which in ALSA is referred to as the "period".
7788 unsigned int periods = 0;
7789 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) periods = 2;
7790 if ( options && options->numberOfBuffers > 0 ) periods = options->numberOfBuffers;
7791 if ( periods < 2 ) periods = 4; // a fairly safe default value
7792 result = snd_pcm_hw_params_set_periods_near( phandle, hw_params, &periods, &dir );
7794 snd_pcm_close( phandle );
7795 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting periods for device (" << name << "), " << snd_strerror( result ) << ".";
7796 errorText_ = errorStream_.str();
7800 // If attempting to setup a duplex stream, the bufferSize parameter
7801 // MUST be the same in both directions!
7802 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
7803 snd_pcm_close( phandle );
7804 errorStream_ << "RtApiAlsa::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << name << ").";
7805 errorText_ = errorStream_.str();
7809 stream_.bufferSize = *bufferSize;
7811 // Install the hardware configuration
7812 result = snd_pcm_hw_params( phandle, hw_params );
7814 snd_pcm_close( phandle );
7815 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing hardware configuration on device (" << name << "), " << snd_strerror( result ) << ".";
7816 errorText_ = errorStream_.str();
7820 #if defined(__RTAUDIO_DEBUG__)
7821 fprintf(stderr, "\nRtApiAlsa: dump hardware params after installation:\n\n");
7822 snd_pcm_hw_params_dump( hw_params, out );
7825 // Set the software configuration to fill buffers with zeros and prevent device stopping on xruns.
7826 snd_pcm_sw_params_t *sw_params = NULL;
7827 snd_pcm_sw_params_alloca( &sw_params );
7828 snd_pcm_sw_params_current( phandle, sw_params );
7829 snd_pcm_sw_params_set_start_threshold( phandle, sw_params, *bufferSize );
7830 snd_pcm_sw_params_set_stop_threshold( phandle, sw_params, ULONG_MAX );
7831 snd_pcm_sw_params_set_silence_threshold( phandle, sw_params, 0 );
7833 // The following two settings were suggested by Theo Veenker
7834 //snd_pcm_sw_params_set_avail_min( phandle, sw_params, *bufferSize );
7835 //snd_pcm_sw_params_set_xfer_align( phandle, sw_params, 1 );
7837 // here are two options for a fix
7838 //snd_pcm_sw_params_set_silence_size( phandle, sw_params, ULONG_MAX );
7839 snd_pcm_uframes_t val;
7840 snd_pcm_sw_params_get_boundary( sw_params, &val );
7841 snd_pcm_sw_params_set_silence_size( phandle, sw_params, val );
7843 result = snd_pcm_sw_params( phandle, sw_params );
7845 snd_pcm_close( phandle );
7846 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing software configuration on device (" << name << "), " << snd_strerror( result ) << ".";
7847 errorText_ = errorStream_.str();
7851 #if defined(__RTAUDIO_DEBUG__)
7852 fprintf(stderr, "\nRtApiAlsa: dump software params after installation:\n\n");
7853 snd_pcm_sw_params_dump( sw_params, out );
7856 // Set flags for buffer conversion
7857 stream_.doConvertBuffer[mode] = false;
7858 if ( stream_.userFormat != stream_.deviceFormat[mode] )
7859 stream_.doConvertBuffer[mode] = true;
7860 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
7861 stream_.doConvertBuffer[mode] = true;
7862 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
7863 stream_.nUserChannels[mode] > 1 )
7864 stream_.doConvertBuffer[mode] = true;
7866 // Allocate the ApiHandle if necessary and then save.
7867 AlsaHandle *apiInfo = 0;
7868 if ( stream_.apiHandle == 0 ) {
7870 apiInfo = (AlsaHandle *) new AlsaHandle;
7872 catch ( std::bad_alloc& ) {
7873 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating AlsaHandle memory.";
7877 if ( pthread_cond_init( &apiInfo->runnable_cv, NULL ) ) {
7878 errorText_ = "RtApiAlsa::probeDeviceOpen: error initializing pthread condition variable.";
7882 stream_.apiHandle = (void *) apiInfo;
7883 apiInfo->handles[0] = 0;
7884 apiInfo->handles[1] = 0;
7887 apiInfo = (AlsaHandle *) stream_.apiHandle;
7889 apiInfo->handles[mode] = phandle;
7892 // Allocate necessary internal buffers.
7893 unsigned long bufferBytes;
7894 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
7895 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
7896 if ( stream_.userBuffer[mode] == NULL ) {
7897 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating user buffer memory.";
7901 if ( stream_.doConvertBuffer[mode] ) {
7903 bool makeBuffer = true;
7904 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
7905 if ( mode == INPUT ) {
7906 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
7907 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
7908 if ( bufferBytes <= bytesOut ) makeBuffer = false;
7913 bufferBytes *= *bufferSize;
7914 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
7915 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
7916 if ( stream_.deviceBuffer == NULL ) {
7917 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating device buffer memory.";
7923 stream_.sampleRate = sampleRate;
7924 stream_.nBuffers = periods;
7925 stream_.device[mode] = device;
7926 stream_.state = STREAM_STOPPED;
7928 // Setup the buffer conversion information structure.
7929 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
7931 // Setup thread if necessary.
7932 if ( stream_.mode == OUTPUT && mode == INPUT ) {
7933 // We had already set up an output stream.
7934 stream_.mode = DUPLEX;
7935 // Link the streams if possible.
7936 apiInfo->synchronized = false;
7937 if ( snd_pcm_link( apiInfo->handles[0], apiInfo->handles[1] ) == 0 )
7938 apiInfo->synchronized = true;
7940 errorText_ = "RtApiAlsa::probeDeviceOpen: unable to synchronize input and output devices.";
7941 error( RtAudioError::WARNING );
7945 stream_.mode = mode;
7947 // Setup callback thread.
7948 stream_.callbackInfo.object = (void *) this;
7950 // Set the thread attributes for joinable and realtime scheduling
7951 // priority (optional). The higher priority will only take affect
7952 // if the program is run as root or suid. Note, under Linux
7953 // processes with CAP_SYS_NICE privilege, a user can change
7954 // scheduling policy and priority (thus need not be root). See
7955 // POSIX "capabilities".
7956 pthread_attr_t attr;
7957 pthread_attr_init( &attr );
7958 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
7959 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
7960 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
7961 stream_.callbackInfo.doRealtime = true;
7962 struct sched_param param;
7963 int priority = options->priority;
7964 int min = sched_get_priority_min( SCHED_RR );
7965 int max = sched_get_priority_max( SCHED_RR );
7966 if ( priority < min ) priority = min;
7967 else if ( priority > max ) priority = max;
7968 param.sched_priority = priority;
7970 // Set the policy BEFORE the priority. Otherwise it fails.
7971 pthread_attr_setschedpolicy(&attr, SCHED_RR);
7972 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
7973 // This is definitely required. Otherwise it fails.
7974 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
7975 pthread_attr_setschedparam(&attr, ¶m);
7978 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
7980 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
7983 stream_.callbackInfo.isRunning = true;
7984 result = pthread_create( &stream_.callbackInfo.thread, &attr, alsaCallbackHandler, &stream_.callbackInfo );
7985 pthread_attr_destroy( &attr );
7987 // Failed. Try instead with default attributes.
7988 result = pthread_create( &stream_.callbackInfo.thread, NULL, alsaCallbackHandler, &stream_.callbackInfo );
7990 stream_.callbackInfo.isRunning = false;
7991 errorText_ = "RtApiAlsa::error creating callback thread!";
8001 pthread_cond_destroy( &apiInfo->runnable_cv );
8002 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
8003 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
8005 stream_.apiHandle = 0;
8008 if ( phandle) snd_pcm_close( phandle );
8010 for ( int i=0; i<2; i++ ) {
8011 if ( stream_.userBuffer[i] ) {
8012 free( stream_.userBuffer[i] );
8013 stream_.userBuffer[i] = 0;
8017 if ( stream_.deviceBuffer ) {
8018 free( stream_.deviceBuffer );
8019 stream_.deviceBuffer = 0;
8022 stream_.state = STREAM_CLOSED;
8026 void RtApiAlsa :: closeStream()
8028 if ( stream_.state == STREAM_CLOSED ) {
8029 errorText_ = "RtApiAlsa::closeStream(): no open stream to close!";
8030 error( RtAudioError::WARNING );
8034 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8035 stream_.callbackInfo.isRunning = false;
8036 MUTEX_LOCK( &stream_.mutex );
8037 if ( stream_.state == STREAM_STOPPED ) {
8038 apiInfo->runnable = true;
8039 pthread_cond_signal( &apiInfo->runnable_cv );
8041 MUTEX_UNLOCK( &stream_.mutex );
8042 pthread_join( stream_.callbackInfo.thread, NULL );
8044 if ( stream_.state == STREAM_RUNNING ) {
8045 stream_.state = STREAM_STOPPED;
8046 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
8047 snd_pcm_drop( apiInfo->handles[0] );
8048 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
8049 snd_pcm_drop( apiInfo->handles[1] );
8053 pthread_cond_destroy( &apiInfo->runnable_cv );
8054 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
8055 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
8057 stream_.apiHandle = 0;
8060 for ( int i=0; i<2; i++ ) {
8061 if ( stream_.userBuffer[i] ) {
8062 free( stream_.userBuffer[i] );
8063 stream_.userBuffer[i] = 0;
8067 if ( stream_.deviceBuffer ) {
8068 free( stream_.deviceBuffer );
8069 stream_.deviceBuffer = 0;
8072 stream_.mode = UNINITIALIZED;
8073 stream_.state = STREAM_CLOSED;
8076 void RtApiAlsa :: startStream()
8078 // This method calls snd_pcm_prepare if the device isn't already in that state.
8081 RtApi::startStream();
8082 if ( stream_.state == STREAM_RUNNING ) {
8083 errorText_ = "RtApiAlsa::startStream(): the stream is already running!";
8084 error( RtAudioError::WARNING );
8088 MUTEX_LOCK( &stream_.mutex );
8090 #if defined( HAVE_GETTIMEOFDAY )
8091 gettimeofday( &stream_.lastTickTimestamp, NULL );
8095 snd_pcm_state_t state;
8096 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8097 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
8098 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8099 state = snd_pcm_state( handle[0] );
8100 if ( state != SND_PCM_STATE_PREPARED ) {
8101 result = snd_pcm_prepare( handle[0] );
8103 errorStream_ << "RtApiAlsa::startStream: error preparing output pcm device, " << snd_strerror( result ) << ".";
8104 errorText_ = errorStream_.str();
8110 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
8111 result = snd_pcm_drop(handle[1]); // fix to remove stale data received since device has been open
8112 state = snd_pcm_state( handle[1] );
8113 if ( state != SND_PCM_STATE_PREPARED ) {
8114 result = snd_pcm_prepare( handle[1] );
8116 errorStream_ << "RtApiAlsa::startStream: error preparing input pcm device, " << snd_strerror( result ) << ".";
8117 errorText_ = errorStream_.str();
8123 stream_.state = STREAM_RUNNING;
8126 apiInfo->runnable = true;
8127 pthread_cond_signal( &apiInfo->runnable_cv );
8128 MUTEX_UNLOCK( &stream_.mutex );
8130 if ( result >= 0 ) return;
8131 error( RtAudioError::SYSTEM_ERROR );
8134 void RtApiAlsa :: stopStream()
8137 if ( stream_.state == STREAM_STOPPED ) {
8138 errorText_ = "RtApiAlsa::stopStream(): the stream is already stopped!";
8139 error( RtAudioError::WARNING );
8143 stream_.state = STREAM_STOPPED;
8144 MUTEX_LOCK( &stream_.mutex );
8147 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8148 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
8149 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8150 if ( apiInfo->synchronized )
8151 result = snd_pcm_drop( handle[0] );
8153 result = snd_pcm_drain( handle[0] );
8155 errorStream_ << "RtApiAlsa::stopStream: error draining output pcm device, " << snd_strerror( result ) << ".";
8156 errorText_ = errorStream_.str();
8161 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
8162 result = snd_pcm_drop( handle[1] );
8164 errorStream_ << "RtApiAlsa::stopStream: error stopping input pcm device, " << snd_strerror( result ) << ".";
8165 errorText_ = errorStream_.str();
8171 apiInfo->runnable = false; // fixes high CPU usage when stopped
8172 MUTEX_UNLOCK( &stream_.mutex );
8174 if ( result >= 0 ) return;
8175 error( RtAudioError::SYSTEM_ERROR );
8178 void RtApiAlsa :: abortStream()
8181 if ( stream_.state == STREAM_STOPPED ) {
8182 errorText_ = "RtApiAlsa::abortStream(): the stream is already stopped!";
8183 error( RtAudioError::WARNING );
8187 stream_.state = STREAM_STOPPED;
8188 MUTEX_LOCK( &stream_.mutex );
8191 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8192 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
8193 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8194 result = snd_pcm_drop( handle[0] );
8196 errorStream_ << "RtApiAlsa::abortStream: error aborting output pcm device, " << snd_strerror( result ) << ".";
8197 errorText_ = errorStream_.str();
8202 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
8203 result = snd_pcm_drop( handle[1] );
8205 errorStream_ << "RtApiAlsa::abortStream: error aborting input pcm device, " << snd_strerror( result ) << ".";
8206 errorText_ = errorStream_.str();
8212 apiInfo->runnable = false; // fixes high CPU usage when stopped
8213 MUTEX_UNLOCK( &stream_.mutex );
8215 if ( result >= 0 ) return;
8216 error( RtAudioError::SYSTEM_ERROR );
8219 void RtApiAlsa :: callbackEvent()
8221 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8222 if ( stream_.state == STREAM_STOPPED ) {
8223 MUTEX_LOCK( &stream_.mutex );
8224 while ( !apiInfo->runnable )
8225 pthread_cond_wait( &apiInfo->runnable_cv, &stream_.mutex );
8227 if ( stream_.state != STREAM_RUNNING ) {
8228 MUTEX_UNLOCK( &stream_.mutex );
8231 MUTEX_UNLOCK( &stream_.mutex );
8234 if ( stream_.state == STREAM_CLOSED ) {
8235 errorText_ = "RtApiAlsa::callbackEvent(): the stream is closed ... this shouldn't happen!";
8236 error( RtAudioError::WARNING );
8240 int doStopStream = 0;
8241 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
8242 double streamTime = getStreamTime();
8243 RtAudioStreamStatus status = 0;
8244 if ( stream_.mode != INPUT && apiInfo->xrun[0] == true ) {
8245 status |= RTAUDIO_OUTPUT_UNDERFLOW;
8246 apiInfo->xrun[0] = false;
8248 if ( stream_.mode != OUTPUT && apiInfo->xrun[1] == true ) {
8249 status |= RTAUDIO_INPUT_OVERFLOW;
8250 apiInfo->xrun[1] = false;
8252 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
8253 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
8255 if ( doStopStream == 2 ) {
8260 MUTEX_LOCK( &stream_.mutex );
8262 // The state might change while waiting on a mutex.
8263 if ( stream_.state == STREAM_STOPPED ) goto unlock;
8269 snd_pcm_sframes_t frames;
8270 RtAudioFormat format;
8271 handle = (snd_pcm_t **) apiInfo->handles;
8273 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
8275 // Setup parameters.
8276 if ( stream_.doConvertBuffer[1] ) {
8277 buffer = stream_.deviceBuffer;
8278 channels = stream_.nDeviceChannels[1];
8279 format = stream_.deviceFormat[1];
8282 buffer = stream_.userBuffer[1];
8283 channels = stream_.nUserChannels[1];
8284 format = stream_.userFormat;
8287 // Read samples from device in interleaved/non-interleaved format.
8288 if ( stream_.deviceInterleaved[1] )
8289 result = snd_pcm_readi( handle[1], buffer, stream_.bufferSize );
8291 void *bufs[channels];
8292 size_t offset = stream_.bufferSize * formatBytes( format );
8293 for ( int i=0; i<channels; i++ )
8294 bufs[i] = (void *) (buffer + (i * offset));
8295 result = snd_pcm_readn( handle[1], bufs, stream_.bufferSize );
8298 if ( result < (int) stream_.bufferSize ) {
8299 // Either an error or overrun occured.
8300 if ( result == -EPIPE ) {
8301 snd_pcm_state_t state = snd_pcm_state( handle[1] );
8302 if ( state == SND_PCM_STATE_XRUN ) {
8303 apiInfo->xrun[1] = true;
8304 result = snd_pcm_prepare( handle[1] );
8306 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after overrun, " << snd_strerror( result ) << ".";
8307 errorText_ = errorStream_.str();
8311 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
8312 errorText_ = errorStream_.str();
8316 errorStream_ << "RtApiAlsa::callbackEvent: audio read error, " << snd_strerror( result ) << ".";
8317 errorText_ = errorStream_.str();
8319 error( RtAudioError::WARNING );
8323 // Do byte swapping if necessary.
8324 if ( stream_.doByteSwap[1] )
8325 byteSwapBuffer( buffer, stream_.bufferSize * channels, format );
8327 // Do buffer conversion if necessary.
8328 if ( stream_.doConvertBuffer[1] )
8329 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
8331 // Check stream latency
8332 result = snd_pcm_delay( handle[1], &frames );
8333 if ( result == 0 && frames > 0 ) stream_.latency[1] = frames;
8338 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8340 // Setup parameters and do buffer conversion if necessary.
8341 if ( stream_.doConvertBuffer[0] ) {
8342 buffer = stream_.deviceBuffer;
8343 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
8344 channels = stream_.nDeviceChannels[0];
8345 format = stream_.deviceFormat[0];
8348 buffer = stream_.userBuffer[0];
8349 channels = stream_.nUserChannels[0];
8350 format = stream_.userFormat;
8353 // Do byte swapping if necessary.
8354 if ( stream_.doByteSwap[0] )
8355 byteSwapBuffer(buffer, stream_.bufferSize * channels, format);
8357 // Write samples to device in interleaved/non-interleaved format.
8358 if ( stream_.deviceInterleaved[0] )
8359 result = snd_pcm_writei( handle[0], buffer, stream_.bufferSize );
8361 void *bufs[channels];
8362 size_t offset = stream_.bufferSize * formatBytes( format );
8363 for ( int i=0; i<channels; i++ )
8364 bufs[i] = (void *) (buffer + (i * offset));
8365 result = snd_pcm_writen( handle[0], bufs, stream_.bufferSize );
8368 if ( result < (int) stream_.bufferSize ) {
8369 // Either an error or underrun occured.
8370 if ( result == -EPIPE ) {
8371 snd_pcm_state_t state = snd_pcm_state( handle[0] );
8372 if ( state == SND_PCM_STATE_XRUN ) {
8373 apiInfo->xrun[0] = true;
8374 result = snd_pcm_prepare( handle[0] );
8376 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after underrun, " << snd_strerror( result ) << ".";
8377 errorText_ = errorStream_.str();
8380 errorText_ = "RtApiAlsa::callbackEvent: audio write error, underrun.";
8383 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
8384 errorText_ = errorStream_.str();
8388 errorStream_ << "RtApiAlsa::callbackEvent: audio write error, " << snd_strerror( result ) << ".";
8389 errorText_ = errorStream_.str();
8391 error( RtAudioError::WARNING );
8395 // Check stream latency
8396 result = snd_pcm_delay( handle[0], &frames );
8397 if ( result == 0 && frames > 0 ) stream_.latency[0] = frames;
8401 MUTEX_UNLOCK( &stream_.mutex );
8403 RtApi::tickStreamTime();
8404 if ( doStopStream == 1 ) this->stopStream();
8407 static void *alsaCallbackHandler( void *ptr )
8409 CallbackInfo *info = (CallbackInfo *) ptr;
8410 RtApiAlsa *object = (RtApiAlsa *) info->object;
8411 bool *isRunning = &info->isRunning;
8413 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
8414 if ( info->doRealtime ) {
8415 std::cerr << "RtAudio alsa: " <<
8416 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
8417 "running realtime scheduling" << std::endl;
8421 while ( *isRunning == true ) {
8422 pthread_testcancel();
8423 object->callbackEvent();
8426 pthread_exit( NULL );
8429 //******************** End of __LINUX_ALSA__ *********************//
8432 #if defined(__LINUX_PULSE__)
8434 // Code written by Peter Meerwald, pmeerw@pmeerw.net
8435 // and Tristan Matthews.
8437 #include <pulse/error.h>
8438 #include <pulse/simple.h>
8441 static const unsigned int SUPPORTED_SAMPLERATES[] = { 8000, 16000, 22050, 32000,
8442 44100, 48000, 96000, 0};
8444 struct rtaudio_pa_format_mapping_t {
8445 RtAudioFormat rtaudio_format;
8446 pa_sample_format_t pa_format;
8449 static const rtaudio_pa_format_mapping_t supported_sampleformats[] = {
8450 {RTAUDIO_SINT16, PA_SAMPLE_S16LE},
8451 {RTAUDIO_SINT32, PA_SAMPLE_S32LE},
8452 {RTAUDIO_FLOAT32, PA_SAMPLE_FLOAT32LE},
8453 {0, PA_SAMPLE_INVALID}};
8455 struct PulseAudioHandle {
8459 pthread_cond_t runnable_cv;
8461 PulseAudioHandle() : s_play(0), s_rec(0), runnable(false) { }
8464 RtApiPulse::~RtApiPulse()
8466 if ( stream_.state != STREAM_CLOSED )
8470 unsigned int RtApiPulse::getDeviceCount( void )
8475 RtAudio::DeviceInfo RtApiPulse::getDeviceInfo( unsigned int /*device*/ )
8477 RtAudio::DeviceInfo info;
8479 info.name = "PulseAudio";
8480 info.outputChannels = 2;
8481 info.inputChannels = 2;
8482 info.duplexChannels = 2;
8483 info.isDefaultOutput = true;
8484 info.isDefaultInput = true;
8486 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr )
8487 info.sampleRates.push_back( *sr );
8489 info.preferredSampleRate = 48000;
8490 info.nativeFormats = RTAUDIO_SINT16 | RTAUDIO_SINT32 | RTAUDIO_FLOAT32;
8495 static void *pulseaudio_callback( void * user )
8497 CallbackInfo *cbi = static_cast<CallbackInfo *>( user );
8498 RtApiPulse *context = static_cast<RtApiPulse *>( cbi->object );
8499 volatile bool *isRunning = &cbi->isRunning;
8501 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
8502 if (cbi->doRealtime) {
8503 std::cerr << "RtAudio pulse: " <<
8504 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
8505 "running realtime scheduling" << std::endl;
8509 while ( *isRunning ) {
8510 pthread_testcancel();
8511 context->callbackEvent();
8514 pthread_exit( NULL );
8517 void RtApiPulse::closeStream( void )
8519 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8521 stream_.callbackInfo.isRunning = false;
8523 MUTEX_LOCK( &stream_.mutex );
8524 if ( stream_.state == STREAM_STOPPED ) {
8525 pah->runnable = true;
8526 pthread_cond_signal( &pah->runnable_cv );
8528 MUTEX_UNLOCK( &stream_.mutex );
8530 pthread_join( pah->thread, 0 );
8531 if ( pah->s_play ) {
8532 pa_simple_flush( pah->s_play, NULL );
8533 pa_simple_free( pah->s_play );
8536 pa_simple_free( pah->s_rec );
8538 pthread_cond_destroy( &pah->runnable_cv );
8540 stream_.apiHandle = 0;
8543 if ( stream_.userBuffer[0] ) {
8544 free( stream_.userBuffer[0] );
8545 stream_.userBuffer[0] = 0;
8547 if ( stream_.userBuffer[1] ) {
8548 free( stream_.userBuffer[1] );
8549 stream_.userBuffer[1] = 0;
8552 stream_.state = STREAM_CLOSED;
8553 stream_.mode = UNINITIALIZED;
8556 void RtApiPulse::callbackEvent( void )
8558 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8560 if ( stream_.state == STREAM_STOPPED ) {
8561 MUTEX_LOCK( &stream_.mutex );
8562 while ( !pah->runnable )
8563 pthread_cond_wait( &pah->runnable_cv, &stream_.mutex );
8565 if ( stream_.state != STREAM_RUNNING ) {
8566 MUTEX_UNLOCK( &stream_.mutex );
8569 MUTEX_UNLOCK( &stream_.mutex );
8572 if ( stream_.state == STREAM_CLOSED ) {
8573 errorText_ = "RtApiPulse::callbackEvent(): the stream is closed ... "
8574 "this shouldn't happen!";
8575 error( RtAudioError::WARNING );
8579 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
8580 double streamTime = getStreamTime();
8581 RtAudioStreamStatus status = 0;
8582 int doStopStream = callback( stream_.userBuffer[OUTPUT], stream_.userBuffer[INPUT],
8583 stream_.bufferSize, streamTime, status,
8584 stream_.callbackInfo.userData );
8586 if ( doStopStream == 2 ) {
8591 MUTEX_LOCK( &stream_.mutex );
8592 void *pulse_in = stream_.doConvertBuffer[INPUT] ? stream_.deviceBuffer : stream_.userBuffer[INPUT];
8593 void *pulse_out = stream_.doConvertBuffer[OUTPUT] ? stream_.deviceBuffer : stream_.userBuffer[OUTPUT];
8595 if ( stream_.state != STREAM_RUNNING )
8600 if (stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8601 if ( stream_.doConvertBuffer[OUTPUT] ) {
8602 convertBuffer( stream_.deviceBuffer,
8603 stream_.userBuffer[OUTPUT],
8604 stream_.convertInfo[OUTPUT] );
8605 bytes = stream_.nDeviceChannels[OUTPUT] * stream_.bufferSize *
8606 formatBytes( stream_.deviceFormat[OUTPUT] );
8608 bytes = stream_.nUserChannels[OUTPUT] * stream_.bufferSize *
8609 formatBytes( stream_.userFormat );
8611 if ( pa_simple_write( pah->s_play, pulse_out, bytes, &pa_error ) < 0 ) {
8612 errorStream_ << "RtApiPulse::callbackEvent: audio write error, " <<
8613 pa_strerror( pa_error ) << ".";
8614 errorText_ = errorStream_.str();
8615 error( RtAudioError::WARNING );
8619 if ( stream_.mode == INPUT || stream_.mode == DUPLEX) {
8620 if ( stream_.doConvertBuffer[INPUT] )
8621 bytes = stream_.nDeviceChannels[INPUT] * stream_.bufferSize *
8622 formatBytes( stream_.deviceFormat[INPUT] );
8624 bytes = stream_.nUserChannels[INPUT] * stream_.bufferSize *
8625 formatBytes( stream_.userFormat );
8627 if ( pa_simple_read( pah->s_rec, pulse_in, bytes, &pa_error ) < 0 ) {
8628 errorStream_ << "RtApiPulse::callbackEvent: audio read error, " <<
8629 pa_strerror( pa_error ) << ".";
8630 errorText_ = errorStream_.str();
8631 error( RtAudioError::WARNING );
8633 if ( stream_.doConvertBuffer[INPUT] ) {
8634 convertBuffer( stream_.userBuffer[INPUT],
8635 stream_.deviceBuffer,
8636 stream_.convertInfo[INPUT] );
8641 MUTEX_UNLOCK( &stream_.mutex );
8642 RtApi::tickStreamTime();
8646 pa_usec_t const lat = pa_simple_get_latency(pah->s_play, &e);
8648 stream_.latency[0] = lat * stream_.sampleRate / 1000000;
8652 if ( doStopStream == 1 )
8656 void RtApiPulse::startStream( void )
8658 RtApi::startStream();
8659 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8661 if ( stream_.state == STREAM_CLOSED ) {
8662 errorText_ = "RtApiPulse::startStream(): the stream is not open!";
8663 error( RtAudioError::INVALID_USE );
8666 if ( stream_.state == STREAM_RUNNING ) {
8667 errorText_ = "RtApiPulse::startStream(): the stream is already running!";
8668 error( RtAudioError::WARNING );
8672 MUTEX_LOCK( &stream_.mutex );
8674 #if defined( HAVE_GETTIMEOFDAY )
8675 gettimeofday( &stream_.lastTickTimestamp, NULL );
8678 stream_.state = STREAM_RUNNING;
8680 pah->runnable = true;
8681 pthread_cond_signal( &pah->runnable_cv );
8682 MUTEX_UNLOCK( &stream_.mutex );
8685 void RtApiPulse::stopStream( void )
8687 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8689 if ( stream_.state == STREAM_CLOSED ) {
8690 errorText_ = "RtApiPulse::stopStream(): the stream is not open!";
8691 error( RtAudioError::INVALID_USE );
8694 if ( stream_.state == STREAM_STOPPED ) {
8695 errorText_ = "RtApiPulse::stopStream(): the stream is already stopped!";
8696 error( RtAudioError::WARNING );
8700 stream_.state = STREAM_STOPPED;
8701 pah->runnable = false;
8702 MUTEX_LOCK( &stream_.mutex );
8704 if ( pah && pah->s_play ) {
8706 if ( pa_simple_drain( pah->s_play, &pa_error ) < 0 ) {
8707 errorStream_ << "RtApiPulse::stopStream: error draining output device, " <<
8708 pa_strerror( pa_error ) << ".";
8709 errorText_ = errorStream_.str();
8710 MUTEX_UNLOCK( &stream_.mutex );
8711 error( RtAudioError::SYSTEM_ERROR );
8716 stream_.state = STREAM_STOPPED;
8717 MUTEX_UNLOCK( &stream_.mutex );
8720 void RtApiPulse::abortStream( void )
8722 PulseAudioHandle *pah = static_cast<PulseAudioHandle*>( stream_.apiHandle );
8724 if ( stream_.state == STREAM_CLOSED ) {
8725 errorText_ = "RtApiPulse::abortStream(): the stream is not open!";
8726 error( RtAudioError::INVALID_USE );
8729 if ( stream_.state == STREAM_STOPPED ) {
8730 errorText_ = "RtApiPulse::abortStream(): the stream is already stopped!";
8731 error( RtAudioError::WARNING );
8735 stream_.state = STREAM_STOPPED;
8736 pah->runnable = false;
8737 MUTEX_LOCK( &stream_.mutex );
8739 if ( pah && pah->s_play ) {
8741 if ( pa_simple_flush( pah->s_play, &pa_error ) < 0 ) {
8742 errorStream_ << "RtApiPulse::abortStream: error flushing output device, " <<
8743 pa_strerror( pa_error ) << ".";
8744 errorText_ = errorStream_.str();
8745 MUTEX_UNLOCK( &stream_.mutex );
8746 error( RtAudioError::SYSTEM_ERROR );
8751 stream_.state = STREAM_STOPPED;
8752 MUTEX_UNLOCK( &stream_.mutex );
8755 bool RtApiPulse::probeDeviceOpen( unsigned int device, StreamMode mode,
8756 unsigned int channels, unsigned int firstChannel,
8757 unsigned int sampleRate, RtAudioFormat format,
8758 unsigned int *bufferSize, RtAudio::StreamOptions *options )
8760 PulseAudioHandle *pah = 0;
8761 unsigned long bufferBytes = 0;
8764 if ( device != 0 ) return false;
8765 if ( mode != INPUT && mode != OUTPUT ) return false;
8766 if ( channels != 1 && channels != 2 ) {
8767 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported number of channels.";
8770 ss.channels = channels;
8772 if ( firstChannel != 0 ) return false;
8774 bool sr_found = false;
8775 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr ) {
8776 if ( sampleRate == *sr ) {
8778 stream_.sampleRate = sampleRate;
8779 ss.rate = sampleRate;
8784 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported sample rate.";
8789 for ( const rtaudio_pa_format_mapping_t *sf = supported_sampleformats;
8790 sf->rtaudio_format && sf->pa_format != PA_SAMPLE_INVALID; ++sf ) {
8791 if ( format == sf->rtaudio_format ) {
8793 stream_.userFormat = sf->rtaudio_format;
8794 stream_.deviceFormat[mode] = stream_.userFormat;
8795 ss.format = sf->pa_format;
8799 if ( !sf_found ) { // Use internal data format conversion.
8800 stream_.userFormat = format;
8801 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
8802 ss.format = PA_SAMPLE_FLOAT32LE;
8805 // Set other stream parameters.
8806 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
8807 else stream_.userInterleaved = true;
8808 stream_.deviceInterleaved[mode] = true;
8809 stream_.nBuffers = 1;
8810 stream_.doByteSwap[mode] = false;
8811 stream_.nUserChannels[mode] = channels;
8812 stream_.nDeviceChannels[mode] = channels + firstChannel;
8813 stream_.channelOffset[mode] = 0;
8814 std::string streamName = "RtAudio";
8816 // Set flags for buffer conversion.
8817 stream_.doConvertBuffer[mode] = false;
8818 if ( stream_.userFormat != stream_.deviceFormat[mode] )
8819 stream_.doConvertBuffer[mode] = true;
8820 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
8821 stream_.doConvertBuffer[mode] = true;
8823 // Allocate necessary internal buffers.
8824 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
8825 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
8826 if ( stream_.userBuffer[mode] == NULL ) {
8827 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating user buffer memory.";
8830 stream_.bufferSize = *bufferSize;
8832 if ( stream_.doConvertBuffer[mode] ) {
8834 bool makeBuffer = true;
8835 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
8836 if ( mode == INPUT ) {
8837 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
8838 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
8839 if ( bufferBytes <= bytesOut ) makeBuffer = false;
8844 bufferBytes *= *bufferSize;
8845 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
8846 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
8847 if ( stream_.deviceBuffer == NULL ) {
8848 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating device buffer memory.";
8854 stream_.device[mode] = device;
8856 // Setup the buffer conversion information structure.
8857 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
8859 if ( !stream_.apiHandle ) {
8860 PulseAudioHandle *pah = new PulseAudioHandle;
8862 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating memory for handle.";
8866 stream_.apiHandle = pah;
8867 if ( pthread_cond_init( &pah->runnable_cv, NULL ) != 0 ) {
8868 errorText_ = "RtApiPulse::probeDeviceOpen: error creating condition variable.";
8872 pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8875 if ( options && !options->streamName.empty() ) streamName = options->streamName;
8878 pa_buffer_attr buffer_attr;
8879 buffer_attr.fragsize = bufferBytes;
8880 buffer_attr.maxlength = -1;
8882 pah->s_rec = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_RECORD, NULL, "Record", &ss, NULL, &buffer_attr, &error );
8883 if ( !pah->s_rec ) {
8884 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting input to PulseAudio server.";
8889 pah->s_play = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_PLAYBACK, NULL, "Playback", &ss, NULL, NULL, &error );
8890 if ( !pah->s_play ) {
8891 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting output to PulseAudio server.";
8899 if ( stream_.mode == UNINITIALIZED )
8900 stream_.mode = mode;
8901 else if ( stream_.mode == mode )
8904 stream_.mode = DUPLEX;
8906 if ( !stream_.callbackInfo.isRunning ) {
8907 stream_.callbackInfo.object = this;
8909 stream_.state = STREAM_STOPPED;
8910 // Set the thread attributes for joinable and realtime scheduling
8911 // priority (optional). The higher priority will only take affect
8912 // if the program is run as root or suid. Note, under Linux
8913 // processes with CAP_SYS_NICE privilege, a user can change
8914 // scheduling policy and priority (thus need not be root). See
8915 // POSIX "capabilities".
8916 pthread_attr_t attr;
8917 pthread_attr_init( &attr );
8918 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
8919 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
8920 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
8921 stream_.callbackInfo.doRealtime = true;
8922 struct sched_param param;
8923 int priority = options->priority;
8924 int min = sched_get_priority_min( SCHED_RR );
8925 int max = sched_get_priority_max( SCHED_RR );
8926 if ( priority < min ) priority = min;
8927 else if ( priority > max ) priority = max;
8928 param.sched_priority = priority;
8930 // Set the policy BEFORE the priority. Otherwise it fails.
8931 pthread_attr_setschedpolicy(&attr, SCHED_RR);
8932 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
8933 // This is definitely required. Otherwise it fails.
8934 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
8935 pthread_attr_setschedparam(&attr, ¶m);
8938 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
8940 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
8943 stream_.callbackInfo.isRunning = true;
8944 int result = pthread_create( &pah->thread, &attr, pulseaudio_callback, (void *)&stream_.callbackInfo);
8945 pthread_attr_destroy(&attr);
8947 // Failed. Try instead with default attributes.
8948 result = pthread_create( &pah->thread, NULL, pulseaudio_callback, (void *)&stream_.callbackInfo);
8950 stream_.callbackInfo.isRunning = false;
8951 errorText_ = "RtApiPulse::probeDeviceOpen: error creating thread.";
8960 if ( pah && stream_.callbackInfo.isRunning ) {
8961 pthread_cond_destroy( &pah->runnable_cv );
8963 stream_.apiHandle = 0;
8966 for ( int i=0; i<2; i++ ) {
8967 if ( stream_.userBuffer[i] ) {
8968 free( stream_.userBuffer[i] );
8969 stream_.userBuffer[i] = 0;
8973 if ( stream_.deviceBuffer ) {
8974 free( stream_.deviceBuffer );
8975 stream_.deviceBuffer = 0;
8978 stream_.state = STREAM_CLOSED;
8982 //******************** End of __LINUX_PULSE__ *********************//
8985 #if defined(__LINUX_OSS__)
8988 #include <sys/ioctl.h>
8991 #include <sys/soundcard.h>
8995 static void *ossCallbackHandler(void * ptr);
8997 // A structure to hold various information related to the OSS API
9000 int id[2]; // device ids
9003 pthread_cond_t runnable;
9006 :triggered(false) { id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
9009 RtApiOss :: RtApiOss()
9011 // Nothing to do here.
9014 RtApiOss :: ~RtApiOss()
9016 if ( stream_.state != STREAM_CLOSED ) closeStream();
9019 unsigned int RtApiOss :: getDeviceCount( void )
9021 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
9022 if ( mixerfd == -1 ) {
9023 errorText_ = "RtApiOss::getDeviceCount: error opening '/dev/mixer'.";
9024 error( RtAudioError::WARNING );
9028 oss_sysinfo sysinfo;
9029 if ( ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo ) == -1 ) {
9031 errorText_ = "RtApiOss::getDeviceCount: error getting sysinfo, OSS version >= 4.0 is required.";
9032 error( RtAudioError::WARNING );
9037 return sysinfo.numaudios;
9040 RtAudio::DeviceInfo RtApiOss :: getDeviceInfo( unsigned int device )
9042 RtAudio::DeviceInfo info;
9043 info.probed = false;
9045 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
9046 if ( mixerfd == -1 ) {
9047 errorText_ = "RtApiOss::getDeviceInfo: error opening '/dev/mixer'.";
9048 error( RtAudioError::WARNING );
9052 oss_sysinfo sysinfo;
9053 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
9054 if ( result == -1 ) {
9056 errorText_ = "RtApiOss::getDeviceInfo: error getting sysinfo, OSS version >= 4.0 is required.";
9057 error( RtAudioError::WARNING );
9061 unsigned nDevices = sysinfo.numaudios;
9062 if ( nDevices == 0 ) {
9064 errorText_ = "RtApiOss::getDeviceInfo: no devices found!";
9065 error( RtAudioError::INVALID_USE );
9069 if ( device >= nDevices ) {
9071 errorText_ = "RtApiOss::getDeviceInfo: device ID is invalid!";
9072 error( RtAudioError::INVALID_USE );
9076 oss_audioinfo ainfo;
9078 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
9080 if ( result == -1 ) {
9081 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
9082 errorText_ = errorStream_.str();
9083 error( RtAudioError::WARNING );
9088 if ( ainfo.caps & PCM_CAP_OUTPUT ) info.outputChannels = ainfo.max_channels;
9089 if ( ainfo.caps & PCM_CAP_INPUT ) info.inputChannels = ainfo.max_channels;
9090 if ( ainfo.caps & PCM_CAP_DUPLEX ) {
9091 if ( info.outputChannels > 0 && info.inputChannels > 0 && ainfo.caps & PCM_CAP_DUPLEX )
9092 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
9095 // Probe data formats ... do for input
9096 unsigned long mask = ainfo.iformats;
9097 if ( mask & AFMT_S16_LE || mask & AFMT_S16_BE )
9098 info.nativeFormats |= RTAUDIO_SINT16;
9099 if ( mask & AFMT_S8 )
9100 info.nativeFormats |= RTAUDIO_SINT8;
9101 if ( mask & AFMT_S32_LE || mask & AFMT_S32_BE )
9102 info.nativeFormats |= RTAUDIO_SINT32;
9104 if ( mask & AFMT_FLOAT )
9105 info.nativeFormats |= RTAUDIO_FLOAT32;
9107 if ( mask & AFMT_S24_LE || mask & AFMT_S24_BE )
9108 info.nativeFormats |= RTAUDIO_SINT24;
9110 // Check that we have at least one supported format
9111 if ( info.nativeFormats == 0 ) {
9112 errorStream_ << "RtApiOss::getDeviceInfo: device (" << ainfo.name << ") data format not supported by RtAudio.";
9113 errorText_ = errorStream_.str();
9114 error( RtAudioError::WARNING );
9118 // Probe the supported sample rates.
9119 info.sampleRates.clear();
9120 if ( ainfo.nrates ) {
9121 for ( unsigned int i=0; i<ainfo.nrates; i++ ) {
9122 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
9123 if ( ainfo.rates[i] == SAMPLE_RATES[k] ) {
9124 info.sampleRates.push_back( SAMPLE_RATES[k] );
9126 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
9127 info.preferredSampleRate = SAMPLE_RATES[k];
9135 // Check min and max rate values;
9136 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
9137 if ( ainfo.min_rate <= (int) SAMPLE_RATES[k] && ainfo.max_rate >= (int) SAMPLE_RATES[k] ) {
9138 info.sampleRates.push_back( SAMPLE_RATES[k] );
9140 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
9141 info.preferredSampleRate = SAMPLE_RATES[k];
9146 if ( info.sampleRates.size() == 0 ) {
9147 errorStream_ << "RtApiOss::getDeviceInfo: no supported sample rates found for device (" << ainfo.name << ").";
9148 errorText_ = errorStream_.str();
9149 error( RtAudioError::WARNING );
9153 info.name = ainfo.name;
9160 bool RtApiOss :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
9161 unsigned int firstChannel, unsigned int sampleRate,
9162 RtAudioFormat format, unsigned int *bufferSize,
9163 RtAudio::StreamOptions *options )
9165 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
9166 if ( mixerfd == -1 ) {
9167 errorText_ = "RtApiOss::probeDeviceOpen: error opening '/dev/mixer'.";
9171 oss_sysinfo sysinfo;
9172 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
9173 if ( result == -1 ) {
9175 errorText_ = "RtApiOss::probeDeviceOpen: error getting sysinfo, OSS version >= 4.0 is required.";
9179 unsigned nDevices = sysinfo.numaudios;
9180 if ( nDevices == 0 ) {
9181 // This should not happen because a check is made before this function is called.
9183 errorText_ = "RtApiOss::probeDeviceOpen: no devices found!";
9187 if ( device >= nDevices ) {
9188 // This should not happen because a check is made before this function is called.
9190 errorText_ = "RtApiOss::probeDeviceOpen: device ID is invalid!";
9194 oss_audioinfo ainfo;
9196 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
9198 if ( result == -1 ) {
9199 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
9200 errorText_ = errorStream_.str();
9204 // Check if device supports input or output
9205 if ( ( mode == OUTPUT && !( ainfo.caps & PCM_CAP_OUTPUT ) ) ||
9206 ( mode == INPUT && !( ainfo.caps & PCM_CAP_INPUT ) ) ) {
9207 if ( mode == OUTPUT )
9208 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support output.";
9210 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support input.";
9211 errorText_ = errorStream_.str();
9216 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9217 if ( mode == OUTPUT )
9219 else { // mode == INPUT
9220 if (stream_.mode == OUTPUT && stream_.device[0] == device) {
9221 // We just set the same device for playback ... close and reopen for duplex (OSS only).
9222 close( handle->id[0] );
9224 if ( !( ainfo.caps & PCM_CAP_DUPLEX ) ) {
9225 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support duplex mode.";
9226 errorText_ = errorStream_.str();
9229 // Check that the number previously set channels is the same.
9230 if ( stream_.nUserChannels[0] != channels ) {
9231 errorStream_ << "RtApiOss::probeDeviceOpen: input/output channels must be equal for OSS duplex device (" << ainfo.name << ").";
9232 errorText_ = errorStream_.str();
9241 // Set exclusive access if specified.
9242 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) flags |= O_EXCL;
9244 // Try to open the device.
9246 fd = open( ainfo.devnode, flags, 0 );
9248 if ( errno == EBUSY )
9249 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") is busy.";
9251 errorStream_ << "RtApiOss::probeDeviceOpen: error opening device (" << ainfo.name << ").";
9252 errorText_ = errorStream_.str();
9256 // For duplex operation, specifically set this mode (this doesn't seem to work).
9258 if ( flags | O_RDWR ) {
9259 result = ioctl( fd, SNDCTL_DSP_SETDUPLEX, NULL );
9260 if ( result == -1) {
9261 errorStream_ << "RtApiOss::probeDeviceOpen: error setting duplex mode for device (" << ainfo.name << ").";
9262 errorText_ = errorStream_.str();
9268 // Check the device channel support.
9269 stream_.nUserChannels[mode] = channels;
9270 if ( ainfo.max_channels < (int)(channels + firstChannel) ) {
9272 errorStream_ << "RtApiOss::probeDeviceOpen: the device (" << ainfo.name << ") does not support requested channel parameters.";
9273 errorText_ = errorStream_.str();
9277 // Set the number of channels.
9278 int deviceChannels = channels + firstChannel;
9279 result = ioctl( fd, SNDCTL_DSP_CHANNELS, &deviceChannels );
9280 if ( result == -1 || deviceChannels < (int)(channels + firstChannel) ) {
9282 errorStream_ << "RtApiOss::probeDeviceOpen: error setting channel parameters on device (" << ainfo.name << ").";
9283 errorText_ = errorStream_.str();
9286 stream_.nDeviceChannels[mode] = deviceChannels;
9288 // Get the data format mask
9290 result = ioctl( fd, SNDCTL_DSP_GETFMTS, &mask );
9291 if ( result == -1 ) {
9293 errorStream_ << "RtApiOss::probeDeviceOpen: error getting device (" << ainfo.name << ") data formats.";
9294 errorText_ = errorStream_.str();
9298 // Determine how to set the device format.
9299 stream_.userFormat = format;
9300 int deviceFormat = -1;
9301 stream_.doByteSwap[mode] = false;
9302 if ( format == RTAUDIO_SINT8 ) {
9303 if ( mask & AFMT_S8 ) {
9304 deviceFormat = AFMT_S8;
9305 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
9308 else if ( format == RTAUDIO_SINT16 ) {
9309 if ( mask & AFMT_S16_NE ) {
9310 deviceFormat = AFMT_S16_NE;
9311 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9313 else if ( mask & AFMT_S16_OE ) {
9314 deviceFormat = AFMT_S16_OE;
9315 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9316 stream_.doByteSwap[mode] = true;
9319 else if ( format == RTAUDIO_SINT24 ) {
9320 if ( mask & AFMT_S24_NE ) {
9321 deviceFormat = AFMT_S24_NE;
9322 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9324 else if ( mask & AFMT_S24_OE ) {
9325 deviceFormat = AFMT_S24_OE;
9326 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9327 stream_.doByteSwap[mode] = true;
9330 else if ( format == RTAUDIO_SINT32 ) {
9331 if ( mask & AFMT_S32_NE ) {
9332 deviceFormat = AFMT_S32_NE;
9333 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9335 else if ( mask & AFMT_S32_OE ) {
9336 deviceFormat = AFMT_S32_OE;
9337 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9338 stream_.doByteSwap[mode] = true;
9342 if ( deviceFormat == -1 ) {
9343 // The user requested format is not natively supported by the device.
9344 if ( mask & AFMT_S16_NE ) {
9345 deviceFormat = AFMT_S16_NE;
9346 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9348 else if ( mask & AFMT_S32_NE ) {
9349 deviceFormat = AFMT_S32_NE;
9350 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9352 else if ( mask & AFMT_S24_NE ) {
9353 deviceFormat = AFMT_S24_NE;
9354 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9356 else if ( mask & AFMT_S16_OE ) {
9357 deviceFormat = AFMT_S16_OE;
9358 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9359 stream_.doByteSwap[mode] = true;
9361 else if ( mask & AFMT_S32_OE ) {
9362 deviceFormat = AFMT_S32_OE;
9363 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9364 stream_.doByteSwap[mode] = true;
9366 else if ( mask & AFMT_S24_OE ) {
9367 deviceFormat = AFMT_S24_OE;
9368 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9369 stream_.doByteSwap[mode] = true;
9371 else if ( mask & AFMT_S8) {
9372 deviceFormat = AFMT_S8;
9373 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
9377 if ( stream_.deviceFormat[mode] == 0 ) {
9378 // This really shouldn't happen ...
9380 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") data format not supported by RtAudio.";
9381 errorText_ = errorStream_.str();
9385 // Set the data format.
9386 int temp = deviceFormat;
9387 result = ioctl( fd, SNDCTL_DSP_SETFMT, &deviceFormat );
9388 if ( result == -1 || deviceFormat != temp ) {
9390 errorStream_ << "RtApiOss::probeDeviceOpen: error setting data format on device (" << ainfo.name << ").";
9391 errorText_ = errorStream_.str();
9395 // Attempt to set the buffer size. According to OSS, the minimum
9396 // number of buffers is two. The supposed minimum buffer size is 16
9397 // bytes, so that will be our lower bound. The argument to this
9398 // call is in the form 0xMMMMSSSS (hex), where the buffer size (in
9399 // bytes) is given as 2^SSSS and the number of buffers as 2^MMMM.
9400 // We'll check the actual value used near the end of the setup
9402 int ossBufferBytes = *bufferSize * formatBytes( stream_.deviceFormat[mode] ) * deviceChannels;
9403 if ( ossBufferBytes < 16 ) ossBufferBytes = 16;
9405 if ( options ) buffers = options->numberOfBuffers;
9406 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) buffers = 2;
9407 if ( buffers < 2 ) buffers = 3;
9408 temp = ((int) buffers << 16) + (int)( log10( (double)ossBufferBytes ) / log10( 2.0 ) );
9409 result = ioctl( fd, SNDCTL_DSP_SETFRAGMENT, &temp );
9410 if ( result == -1 ) {
9412 errorStream_ << "RtApiOss::probeDeviceOpen: error setting buffer size on device (" << ainfo.name << ").";
9413 errorText_ = errorStream_.str();
9416 stream_.nBuffers = buffers;
9418 // Save buffer size (in sample frames).
9419 *bufferSize = ossBufferBytes / ( formatBytes(stream_.deviceFormat[mode]) * deviceChannels );
9420 stream_.bufferSize = *bufferSize;
9422 // Set the sample rate.
9423 int srate = sampleRate;
9424 result = ioctl( fd, SNDCTL_DSP_SPEED, &srate );
9425 if ( result == -1 ) {
9427 errorStream_ << "RtApiOss::probeDeviceOpen: error setting sample rate (" << sampleRate << ") on device (" << ainfo.name << ").";
9428 errorText_ = errorStream_.str();
9432 // Verify the sample rate setup worked.
9433 if ( abs( srate - (int)sampleRate ) > 100 ) {
9435 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support sample rate (" << sampleRate << ").";
9436 errorText_ = errorStream_.str();
9439 stream_.sampleRate = sampleRate;
9441 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device) {
9442 // We're doing duplex setup here.
9443 stream_.deviceFormat[0] = stream_.deviceFormat[1];
9444 stream_.nDeviceChannels[0] = deviceChannels;
9447 // Set interleaving parameters.
9448 stream_.userInterleaved = true;
9449 stream_.deviceInterleaved[mode] = true;
9450 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
9451 stream_.userInterleaved = false;
9453 // Set flags for buffer conversion
9454 stream_.doConvertBuffer[mode] = false;
9455 if ( stream_.userFormat != stream_.deviceFormat[mode] )
9456 stream_.doConvertBuffer[mode] = true;
9457 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
9458 stream_.doConvertBuffer[mode] = true;
9459 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
9460 stream_.nUserChannels[mode] > 1 )
9461 stream_.doConvertBuffer[mode] = true;
9463 // Allocate the stream handles if necessary and then save.
9464 if ( stream_.apiHandle == 0 ) {
9466 handle = new OssHandle;
9468 catch ( std::bad_alloc& ) {
9469 errorText_ = "RtApiOss::probeDeviceOpen: error allocating OssHandle memory.";
9473 if ( pthread_cond_init( &handle->runnable, NULL ) ) {
9474 errorText_ = "RtApiOss::probeDeviceOpen: error initializing pthread condition variable.";
9478 stream_.apiHandle = (void *) handle;
9481 handle = (OssHandle *) stream_.apiHandle;
9483 handle->id[mode] = fd;
9485 // Allocate necessary internal buffers.
9486 unsigned long bufferBytes;
9487 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
9488 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
9489 if ( stream_.userBuffer[mode] == NULL ) {
9490 errorText_ = "RtApiOss::probeDeviceOpen: error allocating user buffer memory.";
9494 if ( stream_.doConvertBuffer[mode] ) {
9496 bool makeBuffer = true;
9497 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
9498 if ( mode == INPUT ) {
9499 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
9500 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
9501 if ( bufferBytes <= bytesOut ) makeBuffer = false;
9506 bufferBytes *= *bufferSize;
9507 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
9508 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
9509 if ( stream_.deviceBuffer == NULL ) {
9510 errorText_ = "RtApiOss::probeDeviceOpen: error allocating device buffer memory.";
9516 stream_.device[mode] = device;
9517 stream_.state = STREAM_STOPPED;
9519 // Setup the buffer conversion information structure.
9520 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
9522 // Setup thread if necessary.
9523 if ( stream_.mode == OUTPUT && mode == INPUT ) {
9524 // We had already set up an output stream.
9525 stream_.mode = DUPLEX;
9526 if ( stream_.device[0] == device ) handle->id[0] = fd;
9529 stream_.mode = mode;
9531 // Setup callback thread.
9532 stream_.callbackInfo.object = (void *) this;
9534 // Set the thread attributes for joinable and realtime scheduling
9535 // priority. The higher priority will only take affect if the
9536 // program is run as root or suid.
9537 pthread_attr_t attr;
9538 pthread_attr_init( &attr );
9539 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
9540 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
9541 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
9542 stream_.callbackInfo.doRealtime = true;
9543 struct sched_param param;
9544 int priority = options->priority;
9545 int min = sched_get_priority_min( SCHED_RR );
9546 int max = sched_get_priority_max( SCHED_RR );
9547 if ( priority < min ) priority = min;
9548 else if ( priority > max ) priority = max;
9549 param.sched_priority = priority;
9551 // Set the policy BEFORE the priority. Otherwise it fails.
9552 pthread_attr_setschedpolicy(&attr, SCHED_RR);
9553 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
9554 // This is definitely required. Otherwise it fails.
9555 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
9556 pthread_attr_setschedparam(&attr, ¶m);
9559 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
9561 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
9564 stream_.callbackInfo.isRunning = true;
9565 result = pthread_create( &stream_.callbackInfo.thread, &attr, ossCallbackHandler, &stream_.callbackInfo );
9566 pthread_attr_destroy( &attr );
9568 // Failed. Try instead with default attributes.
9569 result = pthread_create( &stream_.callbackInfo.thread, NULL, ossCallbackHandler, &stream_.callbackInfo );
9571 stream_.callbackInfo.isRunning = false;
9572 errorText_ = "RtApiOss::error creating callback thread!";
9582 pthread_cond_destroy( &handle->runnable );
9583 if ( handle->id[0] ) close( handle->id[0] );
9584 if ( handle->id[1] ) close( handle->id[1] );
9586 stream_.apiHandle = 0;
9589 for ( int i=0; i<2; i++ ) {
9590 if ( stream_.userBuffer[i] ) {
9591 free( stream_.userBuffer[i] );
9592 stream_.userBuffer[i] = 0;
9596 if ( stream_.deviceBuffer ) {
9597 free( stream_.deviceBuffer );
9598 stream_.deviceBuffer = 0;
9601 stream_.state = STREAM_CLOSED;
9605 void RtApiOss :: closeStream()
9607 if ( stream_.state == STREAM_CLOSED ) {
9608 errorText_ = "RtApiOss::closeStream(): no open stream to close!";
9609 error( RtAudioError::WARNING );
9613 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9614 stream_.callbackInfo.isRunning = false;
9615 MUTEX_LOCK( &stream_.mutex );
9616 if ( stream_.state == STREAM_STOPPED )
9617 pthread_cond_signal( &handle->runnable );
9618 MUTEX_UNLOCK( &stream_.mutex );
9619 pthread_join( stream_.callbackInfo.thread, NULL );
9621 if ( stream_.state == STREAM_RUNNING ) {
9622 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
9623 ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9625 ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9626 stream_.state = STREAM_STOPPED;
9630 pthread_cond_destroy( &handle->runnable );
9631 if ( handle->id[0] ) close( handle->id[0] );
9632 if ( handle->id[1] ) close( handle->id[1] );
9634 stream_.apiHandle = 0;
9637 for ( int i=0; i<2; i++ ) {
9638 if ( stream_.userBuffer[i] ) {
9639 free( stream_.userBuffer[i] );
9640 stream_.userBuffer[i] = 0;
9644 if ( stream_.deviceBuffer ) {
9645 free( stream_.deviceBuffer );
9646 stream_.deviceBuffer = 0;
9649 stream_.mode = UNINITIALIZED;
9650 stream_.state = STREAM_CLOSED;
9653 void RtApiOss :: startStream()
9656 RtApi::startStream();
9657 if ( stream_.state == STREAM_RUNNING ) {
9658 errorText_ = "RtApiOss::startStream(): the stream is already running!";
9659 error( RtAudioError::WARNING );
9663 MUTEX_LOCK( &stream_.mutex );
9665 #if defined( HAVE_GETTIMEOFDAY )
9666 gettimeofday( &stream_.lastTickTimestamp, NULL );
9669 stream_.state = STREAM_RUNNING;
9671 // No need to do anything else here ... OSS automatically starts
9672 // when fed samples.
9674 MUTEX_UNLOCK( &stream_.mutex );
9676 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9677 pthread_cond_signal( &handle->runnable );
9680 void RtApiOss :: stopStream()
9683 if ( stream_.state == STREAM_STOPPED ) {
9684 errorText_ = "RtApiOss::stopStream(): the stream is already stopped!";
9685 error( RtAudioError::WARNING );
9689 MUTEX_LOCK( &stream_.mutex );
9691 // The state might change while waiting on a mutex.
9692 if ( stream_.state == STREAM_STOPPED ) {
9693 MUTEX_UNLOCK( &stream_.mutex );
9698 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9699 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9701 // Flush the output with zeros a few times.
9704 RtAudioFormat format;
9706 if ( stream_.doConvertBuffer[0] ) {
9707 buffer = stream_.deviceBuffer;
9708 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
9709 format = stream_.deviceFormat[0];
9712 buffer = stream_.userBuffer[0];
9713 samples = stream_.bufferSize * stream_.nUserChannels[0];
9714 format = stream_.userFormat;
9717 memset( buffer, 0, samples * formatBytes(format) );
9718 for ( unsigned int i=0; i<stream_.nBuffers+1; i++ ) {
9719 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9720 if ( result == -1 ) {
9721 errorText_ = "RtApiOss::stopStream: audio write error.";
9722 error( RtAudioError::WARNING );
9726 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9727 if ( result == -1 ) {
9728 errorStream_ << "RtApiOss::stopStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
9729 errorText_ = errorStream_.str();
9732 handle->triggered = false;
9735 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
9736 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9737 if ( result == -1 ) {
9738 errorStream_ << "RtApiOss::stopStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
9739 errorText_ = errorStream_.str();
9745 stream_.state = STREAM_STOPPED;
9746 MUTEX_UNLOCK( &stream_.mutex );
9748 if ( result != -1 ) return;
9749 error( RtAudioError::SYSTEM_ERROR );
9752 void RtApiOss :: abortStream()
9755 if ( stream_.state == STREAM_STOPPED ) {
9756 errorText_ = "RtApiOss::abortStream(): the stream is already stopped!";
9757 error( RtAudioError::WARNING );
9761 MUTEX_LOCK( &stream_.mutex );
9763 // The state might change while waiting on a mutex.
9764 if ( stream_.state == STREAM_STOPPED ) {
9765 MUTEX_UNLOCK( &stream_.mutex );
9770 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9771 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9772 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9773 if ( result == -1 ) {
9774 errorStream_ << "RtApiOss::abortStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
9775 errorText_ = errorStream_.str();
9778 handle->triggered = false;
9781 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
9782 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9783 if ( result == -1 ) {
9784 errorStream_ << "RtApiOss::abortStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
9785 errorText_ = errorStream_.str();
9791 stream_.state = STREAM_STOPPED;
9792 MUTEX_UNLOCK( &stream_.mutex );
9794 if ( result != -1 ) return;
9795 error( RtAudioError::SYSTEM_ERROR );
9798 void RtApiOss :: callbackEvent()
9800 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9801 if ( stream_.state == STREAM_STOPPED ) {
9802 MUTEX_LOCK( &stream_.mutex );
9803 pthread_cond_wait( &handle->runnable, &stream_.mutex );
9804 if ( stream_.state != STREAM_RUNNING ) {
9805 MUTEX_UNLOCK( &stream_.mutex );
9808 MUTEX_UNLOCK( &stream_.mutex );
9811 if ( stream_.state == STREAM_CLOSED ) {
9812 errorText_ = "RtApiOss::callbackEvent(): the stream is closed ... this shouldn't happen!";
9813 error( RtAudioError::WARNING );
9817 // Invoke user callback to get fresh output data.
9818 int doStopStream = 0;
9819 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
9820 double streamTime = getStreamTime();
9821 RtAudioStreamStatus status = 0;
9822 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
9823 status |= RTAUDIO_OUTPUT_UNDERFLOW;
9824 handle->xrun[0] = false;
9826 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
9827 status |= RTAUDIO_INPUT_OVERFLOW;
9828 handle->xrun[1] = false;
9830 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
9831 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
9832 if ( doStopStream == 2 ) {
9833 this->abortStream();
9837 MUTEX_LOCK( &stream_.mutex );
9839 // The state might change while waiting on a mutex.
9840 if ( stream_.state == STREAM_STOPPED ) goto unlock;
9845 RtAudioFormat format;
9847 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9849 // Setup parameters and do buffer conversion if necessary.
9850 if ( stream_.doConvertBuffer[0] ) {
9851 buffer = stream_.deviceBuffer;
9852 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
9853 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
9854 format = stream_.deviceFormat[0];
9857 buffer = stream_.userBuffer[0];
9858 samples = stream_.bufferSize * stream_.nUserChannels[0];
9859 format = stream_.userFormat;
9862 // Do byte swapping if necessary.
9863 if ( stream_.doByteSwap[0] )
9864 byteSwapBuffer( buffer, samples, format );
9866 if ( stream_.mode == DUPLEX && handle->triggered == false ) {
9868 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
9869 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9870 trig = PCM_ENABLE_INPUT|PCM_ENABLE_OUTPUT;
9871 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
9872 handle->triggered = true;
9875 // Write samples to device.
9876 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9878 if ( result == -1 ) {
9879 // We'll assume this is an underrun, though there isn't a
9880 // specific means for determining that.
9881 handle->xrun[0] = true;
9882 errorText_ = "RtApiOss::callbackEvent: audio write error.";
9883 error( RtAudioError::WARNING );
9884 // Continue on to input section.
9888 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
9890 // Setup parameters.
9891 if ( stream_.doConvertBuffer[1] ) {
9892 buffer = stream_.deviceBuffer;
9893 samples = stream_.bufferSize * stream_.nDeviceChannels[1];
9894 format = stream_.deviceFormat[1];
9897 buffer = stream_.userBuffer[1];
9898 samples = stream_.bufferSize * stream_.nUserChannels[1];
9899 format = stream_.userFormat;
9902 // Read samples from device.
9903 result = read( handle->id[1], buffer, samples * formatBytes(format) );
9905 if ( result == -1 ) {
9906 // We'll assume this is an overrun, though there isn't a
9907 // specific means for determining that.
9908 handle->xrun[1] = true;
9909 errorText_ = "RtApiOss::callbackEvent: audio read error.";
9910 error( RtAudioError::WARNING );
9914 // Do byte swapping if necessary.
9915 if ( stream_.doByteSwap[1] )
9916 byteSwapBuffer( buffer, samples, format );
9918 // Do buffer conversion if necessary.
9919 if ( stream_.doConvertBuffer[1] )
9920 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
9924 MUTEX_UNLOCK( &stream_.mutex );
9926 RtApi::tickStreamTime();
9927 if ( doStopStream == 1 ) this->stopStream();
9930 static void *ossCallbackHandler( void *ptr )
9932 CallbackInfo *info = (CallbackInfo *) ptr;
9933 RtApiOss *object = (RtApiOss *) info->object;
9934 bool *isRunning = &info->isRunning;
9936 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
9937 if (info->doRealtime) {
9938 std::cerr << "RtAudio oss: " <<
9939 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
9940 "running realtime scheduling" << std::endl;
9944 while ( *isRunning == true ) {
9945 pthread_testcancel();
9946 object->callbackEvent();
9949 pthread_exit( NULL );
9952 //******************** End of __LINUX_OSS__ *********************//
9956 // *************************************************** //
9958 // Protected common (OS-independent) RtAudio methods.
9960 // *************************************************** //
9962 // This method can be modified to control the behavior of error
9963 // message printing.
9964 void RtApi :: error( RtAudioError::Type type )
9966 errorStream_.str(""); // clear the ostringstream
9968 RtAudioErrorCallback errorCallback = (RtAudioErrorCallback) stream_.callbackInfo.errorCallback;
9969 if ( errorCallback ) {
9970 // abortStream() can generate new error messages. Ignore them. Just keep original one.
9972 if ( firstErrorOccurred_ )
9975 firstErrorOccurred_ = true;
9976 const std::string errorMessage = errorText_;
9978 if ( type != RtAudioError::WARNING && stream_.state != STREAM_STOPPED) {
9979 stream_.callbackInfo.isRunning = false; // exit from the thread
9983 errorCallback( type, errorMessage );
9984 firstErrorOccurred_ = false;
9988 if ( type == RtAudioError::WARNING && showWarnings_ == true )
9989 std::cerr << '\n' << errorText_ << "\n\n";
9990 else if ( type != RtAudioError::WARNING )
9991 throw( RtAudioError( errorText_, type ) );
9994 void RtApi :: verifyStream()
9996 if ( stream_.state == STREAM_CLOSED ) {
9997 errorText_ = "RtApi:: a stream is not open!";
9998 error( RtAudioError::INVALID_USE );
10002 void RtApi :: clearStreamInfo()
10004 stream_.mode = UNINITIALIZED;
10005 stream_.state = STREAM_CLOSED;
10006 stream_.sampleRate = 0;
10007 stream_.bufferSize = 0;
10008 stream_.nBuffers = 0;
10009 stream_.userFormat = 0;
10010 stream_.userInterleaved = true;
10011 stream_.streamTime = 0.0;
10012 stream_.apiHandle = 0;
10013 stream_.deviceBuffer = 0;
10014 stream_.callbackInfo.callback = 0;
10015 stream_.callbackInfo.userData = 0;
10016 stream_.callbackInfo.isRunning = false;
10017 stream_.callbackInfo.errorCallback = 0;
10018 for ( int i=0; i<2; i++ ) {
10019 stream_.device[i] = 11111;
10020 stream_.doConvertBuffer[i] = false;
10021 stream_.deviceInterleaved[i] = true;
10022 stream_.doByteSwap[i] = false;
10023 stream_.nUserChannels[i] = 0;
10024 stream_.nDeviceChannels[i] = 0;
10025 stream_.channelOffset[i] = 0;
10026 stream_.deviceFormat[i] = 0;
10027 stream_.latency[i] = 0;
10028 stream_.userBuffer[i] = 0;
10029 stream_.convertInfo[i].channels = 0;
10030 stream_.convertInfo[i].inJump = 0;
10031 stream_.convertInfo[i].outJump = 0;
10032 stream_.convertInfo[i].inFormat = 0;
10033 stream_.convertInfo[i].outFormat = 0;
10034 stream_.convertInfo[i].inOffset.clear();
10035 stream_.convertInfo[i].outOffset.clear();
10039 unsigned int RtApi :: formatBytes( RtAudioFormat format )
10041 if ( format == RTAUDIO_SINT16 )
10043 else if ( format == RTAUDIO_SINT32 || format == RTAUDIO_FLOAT32 )
10045 else if ( format == RTAUDIO_FLOAT64 )
10047 else if ( format == RTAUDIO_SINT24 )
10049 else if ( format == RTAUDIO_SINT8 )
10052 errorText_ = "RtApi::formatBytes: undefined format.";
10053 error( RtAudioError::WARNING );
10058 void RtApi :: setConvertInfo( StreamMode mode, unsigned int firstChannel )
10060 if ( mode == INPUT ) { // convert device to user buffer
10061 stream_.convertInfo[mode].inJump = stream_.nDeviceChannels[1];
10062 stream_.convertInfo[mode].outJump = stream_.nUserChannels[1];
10063 stream_.convertInfo[mode].inFormat = stream_.deviceFormat[1];
10064 stream_.convertInfo[mode].outFormat = stream_.userFormat;
10066 else { // convert user to device buffer
10067 stream_.convertInfo[mode].inJump = stream_.nUserChannels[0];
10068 stream_.convertInfo[mode].outJump = stream_.nDeviceChannels[0];
10069 stream_.convertInfo[mode].inFormat = stream_.userFormat;
10070 stream_.convertInfo[mode].outFormat = stream_.deviceFormat[0];
10073 if ( stream_.convertInfo[mode].inJump < stream_.convertInfo[mode].outJump )
10074 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].inJump;
10076 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].outJump;
10078 // Set up the interleave/deinterleave offsets.
10079 if ( stream_.deviceInterleaved[mode] != stream_.userInterleaved ) {
10080 if ( ( mode == OUTPUT && stream_.deviceInterleaved[mode] ) ||
10081 ( mode == INPUT && stream_.userInterleaved ) ) {
10082 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
10083 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
10084 stream_.convertInfo[mode].outOffset.push_back( k );
10085 stream_.convertInfo[mode].inJump = 1;
10089 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
10090 stream_.convertInfo[mode].inOffset.push_back( k );
10091 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
10092 stream_.convertInfo[mode].outJump = 1;
10096 else { // no (de)interleaving
10097 if ( stream_.userInterleaved ) {
10098 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
10099 stream_.convertInfo[mode].inOffset.push_back( k );
10100 stream_.convertInfo[mode].outOffset.push_back( k );
10104 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
10105 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
10106 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
10107 stream_.convertInfo[mode].inJump = 1;
10108 stream_.convertInfo[mode].outJump = 1;
10113 // Add channel offset.
10114 if ( firstChannel > 0 ) {
10115 if ( stream_.deviceInterleaved[mode] ) {
10116 if ( mode == OUTPUT ) {
10117 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
10118 stream_.convertInfo[mode].outOffset[k] += firstChannel;
10121 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
10122 stream_.convertInfo[mode].inOffset[k] += firstChannel;
10126 if ( mode == OUTPUT ) {
10127 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
10128 stream_.convertInfo[mode].outOffset[k] += ( firstChannel * stream_.bufferSize );
10131 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
10132 stream_.convertInfo[mode].inOffset[k] += ( firstChannel * stream_.bufferSize );
10138 void RtApi :: convertBuffer( char *outBuffer, char *inBuffer, ConvertInfo &info )
10140 // This function does format conversion, input/output channel compensation, and
10141 // data interleaving/deinterleaving. 24-bit integers are assumed to occupy
10142 // the lower three bytes of a 32-bit integer.
10144 // Clear our device buffer when in/out duplex device channels are different
10145 if ( outBuffer == stream_.deviceBuffer && stream_.mode == DUPLEX &&
10146 ( stream_.nDeviceChannels[0] < stream_.nDeviceChannels[1] ) )
10147 memset( outBuffer, 0, stream_.bufferSize * info.outJump * formatBytes( info.outFormat ) );
10150 if (info.outFormat == RTAUDIO_FLOAT64) {
10152 Float64 *out = (Float64 *)outBuffer;
10154 if (info.inFormat == RTAUDIO_SINT8) {
10155 signed char *in = (signed char *)inBuffer;
10156 scale = 1.0 / 127.5;
10157 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10158 for (j=0; j<info.channels; j++) {
10159 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
10160 out[info.outOffset[j]] += 0.5;
10161 out[info.outOffset[j]] *= scale;
10164 out += info.outJump;
10167 else if (info.inFormat == RTAUDIO_SINT16) {
10168 Int16 *in = (Int16 *)inBuffer;
10169 scale = 1.0 / 32767.5;
10170 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10171 for (j=0; j<info.channels; j++) {
10172 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
10173 out[info.outOffset[j]] += 0.5;
10174 out[info.outOffset[j]] *= scale;
10177 out += info.outJump;
10180 else if (info.inFormat == RTAUDIO_SINT24) {
10181 Int24 *in = (Int24 *)inBuffer;
10182 scale = 1.0 / 8388607.5;
10183 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10184 for (j=0; j<info.channels; j++) {
10185 out[info.outOffset[j]] = (Float64) (in[info.inOffset[j]].asInt());
10186 out[info.outOffset[j]] += 0.5;
10187 out[info.outOffset[j]] *= scale;
10190 out += info.outJump;
10193 else if (info.inFormat == RTAUDIO_SINT32) {
10194 Int32 *in = (Int32 *)inBuffer;
10195 scale = 1.0 / 2147483647.5;
10196 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10197 for (j=0; j<info.channels; j++) {
10198 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
10199 out[info.outOffset[j]] += 0.5;
10200 out[info.outOffset[j]] *= scale;
10203 out += info.outJump;
10206 else if (info.inFormat == RTAUDIO_FLOAT32) {
10207 Float32 *in = (Float32 *)inBuffer;
10208 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10209 for (j=0; j<info.channels; j++) {
10210 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
10213 out += info.outJump;
10216 else if (info.inFormat == RTAUDIO_FLOAT64) {
10217 // Channel compensation and/or (de)interleaving only.
10218 Float64 *in = (Float64 *)inBuffer;
10219 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10220 for (j=0; j<info.channels; j++) {
10221 out[info.outOffset[j]] = in[info.inOffset[j]];
10224 out += info.outJump;
10228 else if (info.outFormat == RTAUDIO_FLOAT32) {
10230 Float32 *out = (Float32 *)outBuffer;
10232 if (info.inFormat == RTAUDIO_SINT8) {
10233 signed char *in = (signed char *)inBuffer;
10234 scale = (Float32) ( 1.0 / 127.5 );
10235 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10236 for (j=0; j<info.channels; j++) {
10237 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10238 out[info.outOffset[j]] += 0.5;
10239 out[info.outOffset[j]] *= scale;
10242 out += info.outJump;
10245 else if (info.inFormat == RTAUDIO_SINT16) {
10246 Int16 *in = (Int16 *)inBuffer;
10247 scale = (Float32) ( 1.0 / 32767.5 );
10248 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10249 for (j=0; j<info.channels; j++) {
10250 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10251 out[info.outOffset[j]] += 0.5;
10252 out[info.outOffset[j]] *= scale;
10255 out += info.outJump;
10258 else if (info.inFormat == RTAUDIO_SINT24) {
10259 Int24 *in = (Int24 *)inBuffer;
10260 scale = (Float32) ( 1.0 / 8388607.5 );
10261 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10262 for (j=0; j<info.channels; j++) {
10263 out[info.outOffset[j]] = (Float32) (in[info.inOffset[j]].asInt());
10264 out[info.outOffset[j]] += 0.5;
10265 out[info.outOffset[j]] *= scale;
10268 out += info.outJump;
10271 else if (info.inFormat == RTAUDIO_SINT32) {
10272 Int32 *in = (Int32 *)inBuffer;
10273 scale = (Float32) ( 1.0 / 2147483647.5 );
10274 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10275 for (j=0; j<info.channels; j++) {
10276 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10277 out[info.outOffset[j]] += 0.5;
10278 out[info.outOffset[j]] *= scale;
10281 out += info.outJump;
10284 else if (info.inFormat == RTAUDIO_FLOAT32) {
10285 // Channel compensation and/or (de)interleaving only.
10286 Float32 *in = (Float32 *)inBuffer;
10287 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10288 for (j=0; j<info.channels; j++) {
10289 out[info.outOffset[j]] = in[info.inOffset[j]];
10292 out += info.outJump;
10295 else if (info.inFormat == RTAUDIO_FLOAT64) {
10296 Float64 *in = (Float64 *)inBuffer;
10297 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10298 for (j=0; j<info.channels; j++) {
10299 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10302 out += info.outJump;
10306 else if (info.outFormat == RTAUDIO_SINT32) {
10307 Int32 *out = (Int32 *)outBuffer;
10308 if (info.inFormat == RTAUDIO_SINT8) {
10309 signed char *in = (signed char *)inBuffer;
10310 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10311 for (j=0; j<info.channels; j++) {
10312 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
10313 out[info.outOffset[j]] <<= 24;
10316 out += info.outJump;
10319 else if (info.inFormat == RTAUDIO_SINT16) {
10320 Int16 *in = (Int16 *)inBuffer;
10321 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10322 for (j=0; j<info.channels; j++) {
10323 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
10324 out[info.outOffset[j]] <<= 16;
10327 out += info.outJump;
10330 else if (info.inFormat == RTAUDIO_SINT24) {
10331 Int24 *in = (Int24 *)inBuffer;
10332 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10333 for (j=0; j<info.channels; j++) {
10334 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]].asInt();
10335 out[info.outOffset[j]] <<= 8;
10338 out += info.outJump;
10341 else if (info.inFormat == RTAUDIO_SINT32) {
10342 // Channel compensation and/or (de)interleaving only.
10343 Int32 *in = (Int32 *)inBuffer;
10344 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10345 for (j=0; j<info.channels; j++) {
10346 out[info.outOffset[j]] = in[info.inOffset[j]];
10349 out += info.outJump;
10352 else if (info.inFormat == RTAUDIO_FLOAT32) {
10353 Float32 *in = (Float32 *)inBuffer;
10354 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10355 for (j=0; j<info.channels; j++) {
10356 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
10359 out += info.outJump;
10362 else if (info.inFormat == RTAUDIO_FLOAT64) {
10363 Float64 *in = (Float64 *)inBuffer;
10364 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10365 for (j=0; j<info.channels; j++) {
10366 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
10369 out += info.outJump;
10373 else if (info.outFormat == RTAUDIO_SINT24) {
10374 Int24 *out = (Int24 *)outBuffer;
10375 if (info.inFormat == RTAUDIO_SINT8) {
10376 signed char *in = (signed char *)inBuffer;
10377 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10378 for (j=0; j<info.channels; j++) {
10379 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 16);
10380 //out[info.outOffset[j]] <<= 16;
10383 out += info.outJump;
10386 else if (info.inFormat == RTAUDIO_SINT16) {
10387 Int16 *in = (Int16 *)inBuffer;
10388 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10389 for (j=0; j<info.channels; j++) {
10390 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 8);
10391 //out[info.outOffset[j]] <<= 8;
10394 out += info.outJump;
10397 else if (info.inFormat == RTAUDIO_SINT24) {
10398 // Channel compensation and/or (de)interleaving only.
10399 Int24 *in = (Int24 *)inBuffer;
10400 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10401 for (j=0; j<info.channels; j++) {
10402 out[info.outOffset[j]] = in[info.inOffset[j]];
10405 out += info.outJump;
10408 else if (info.inFormat == RTAUDIO_SINT32) {
10409 Int32 *in = (Int32 *)inBuffer;
10410 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10411 for (j=0; j<info.channels; j++) {
10412 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] >> 8);
10413 //out[info.outOffset[j]] >>= 8;
10416 out += info.outJump;
10419 else if (info.inFormat == RTAUDIO_FLOAT32) {
10420 Float32 *in = (Float32 *)inBuffer;
10421 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10422 for (j=0; j<info.channels; j++) {
10423 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
10426 out += info.outJump;
10429 else if (info.inFormat == RTAUDIO_FLOAT64) {
10430 Float64 *in = (Float64 *)inBuffer;
10431 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10432 for (j=0; j<info.channels; j++) {
10433 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
10436 out += info.outJump;
10440 else if (info.outFormat == RTAUDIO_SINT16) {
10441 Int16 *out = (Int16 *)outBuffer;
10442 if (info.inFormat == RTAUDIO_SINT8) {
10443 signed char *in = (signed char *)inBuffer;
10444 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10445 for (j=0; j<info.channels; j++) {
10446 out[info.outOffset[j]] = (Int16) in[info.inOffset[j]];
10447 out[info.outOffset[j]] <<= 8;
10450 out += info.outJump;
10453 else if (info.inFormat == RTAUDIO_SINT16) {
10454 // Channel compensation and/or (de)interleaving only.
10455 Int16 *in = (Int16 *)inBuffer;
10456 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10457 for (j=0; j<info.channels; j++) {
10458 out[info.outOffset[j]] = in[info.inOffset[j]];
10461 out += info.outJump;
10464 else if (info.inFormat == RTAUDIO_SINT24) {
10465 Int24 *in = (Int24 *)inBuffer;
10466 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10467 for (j=0; j<info.channels; j++) {
10468 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]].asInt() >> 8);
10471 out += info.outJump;
10474 else if (info.inFormat == RTAUDIO_SINT32) {
10475 Int32 *in = (Int32 *)inBuffer;
10476 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10477 for (j=0; j<info.channels; j++) {
10478 out[info.outOffset[j]] = (Int16) ((in[info.inOffset[j]] >> 16) & 0x0000ffff);
10481 out += info.outJump;
10484 else if (info.inFormat == RTAUDIO_FLOAT32) {
10485 Float32 *in = (Float32 *)inBuffer;
10486 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10487 for (j=0; j<info.channels; j++) {
10488 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
10491 out += info.outJump;
10494 else if (info.inFormat == RTAUDIO_FLOAT64) {
10495 Float64 *in = (Float64 *)inBuffer;
10496 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10497 for (j=0; j<info.channels; j++) {
10498 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
10501 out += info.outJump;
10505 else if (info.outFormat == RTAUDIO_SINT8) {
10506 signed char *out = (signed char *)outBuffer;
10507 if (info.inFormat == RTAUDIO_SINT8) {
10508 // Channel compensation and/or (de)interleaving only.
10509 signed char *in = (signed char *)inBuffer;
10510 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10511 for (j=0; j<info.channels; j++) {
10512 out[info.outOffset[j]] = in[info.inOffset[j]];
10515 out += info.outJump;
10518 if (info.inFormat == RTAUDIO_SINT16) {
10519 Int16 *in = (Int16 *)inBuffer;
10520 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10521 for (j=0; j<info.channels; j++) {
10522 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 8) & 0x00ff);
10525 out += info.outJump;
10528 else if (info.inFormat == RTAUDIO_SINT24) {
10529 Int24 *in = (Int24 *)inBuffer;
10530 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10531 for (j=0; j<info.channels; j++) {
10532 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]].asInt() >> 16);
10535 out += info.outJump;
10538 else if (info.inFormat == RTAUDIO_SINT32) {
10539 Int32 *in = (Int32 *)inBuffer;
10540 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10541 for (j=0; j<info.channels; j++) {
10542 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 24) & 0x000000ff);
10545 out += info.outJump;
10548 else if (info.inFormat == RTAUDIO_FLOAT32) {
10549 Float32 *in = (Float32 *)inBuffer;
10550 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10551 for (j=0; j<info.channels; j++) {
10552 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
10555 out += info.outJump;
10558 else if (info.inFormat == RTAUDIO_FLOAT64) {
10559 Float64 *in = (Float64 *)inBuffer;
10560 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10561 for (j=0; j<info.channels; j++) {
10562 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
10565 out += info.outJump;
10571 //static inline uint16_t bswap_16(uint16_t x) { return (x>>8) | (x<<8); }
10572 //static inline uint32_t bswap_32(uint32_t x) { return (bswap_16(x&0xffff)<<16) | (bswap_16(x>>16)); }
10573 //static inline uint64_t bswap_64(uint64_t x) { return (((unsigned long long)bswap_32(x&0xffffffffull))<<32) | (bswap_32(x>>32)); }
10575 void RtApi :: byteSwapBuffer( char *buffer, unsigned int samples, RtAudioFormat format )
10581 if ( format == RTAUDIO_SINT16 ) {
10582 for ( unsigned int i=0; i<samples; i++ ) {
10583 // Swap 1st and 2nd bytes.
10588 // Increment 2 bytes.
10592 else if ( format == RTAUDIO_SINT32 ||
10593 format == RTAUDIO_FLOAT32 ) {
10594 for ( unsigned int i=0; i<samples; i++ ) {
10595 // Swap 1st and 4th bytes.
10600 // Swap 2nd and 3rd bytes.
10606 // Increment 3 more bytes.
10610 else if ( format == RTAUDIO_SINT24 ) {
10611 for ( unsigned int i=0; i<samples; i++ ) {
10612 // Swap 1st and 3rd bytes.
10617 // Increment 2 more bytes.
10621 else if ( format == RTAUDIO_FLOAT64 ) {
10622 for ( unsigned int i=0; i<samples; i++ ) {
10623 // Swap 1st and 8th bytes
10628 // Swap 2nd and 7th bytes
10634 // Swap 3rd and 6th bytes
10640 // Swap 4th and 5th bytes
10646 // Increment 5 more bytes.
10652 // Indentation settings for Vim and Emacs
10654 // Local Variables:
10655 // c-basic-offset: 2
10656 // indent-tabs-mode: nil
10659 // vim: et sts=2 sw=2