1 /************************************************************************/
3 \brief Realtime audio i/o C++ classes.
5 RtAudio provides a common API (Application Programming Interface)
6 for realtime audio input/output across Linux (native ALSA, Jack,
7 and OSS), Macintosh OS X (CoreAudio and Jack), and Windows
8 (DirectSound, ASIO and WASAPI) operating systems.
10 RtAudio WWW site: http://www.music.mcgill.ca/~gary/rtaudio/
12 RtAudio: realtime audio i/o C++ classes
13 Copyright (c) 2001-2017 Gary P. Scavone
15 Permission is hereby granted, free of charge, to any person
16 obtaining a copy of this software and associated documentation files
17 (the "Software"), to deal in the Software without restriction,
18 including without limitation the rights to use, copy, modify, merge,
19 publish, distribute, sublicense, and/or sell copies of the Software,
20 and to permit persons to whom the Software is furnished to do so,
21 subject to the following conditions:
23 The above copyright notice and this permission notice shall be
24 included in all copies or substantial portions of the Software.
26 Any person wishing to distribute modifications to the Software is
27 asked to send the modifications to the original developer so that
28 they can be incorporated into the canonical version. This is,
29 however, not a binding provision of this license.
31 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
32 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
33 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
34 IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
35 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
36 CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
37 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
39 /************************************************************************/
41 // RtAudio: Version 5.0.0
51 // Static variable definitions.
52 const unsigned int RtApi::MAX_SAMPLE_RATES = 14;
53 const unsigned int RtApi::SAMPLE_RATES[] = {
54 4000, 5512, 8000, 9600, 11025, 16000, 22050,
55 32000, 44100, 48000, 88200, 96000, 176400, 192000
58 #if defined(__WINDOWS_DS__) || defined(__WINDOWS_ASIO__) || defined(__WINDOWS_WASAPI__)
59 #define MUTEX_INITIALIZE(A) InitializeCriticalSection(A)
60 #define MUTEX_DESTROY(A) DeleteCriticalSection(A)
61 #define MUTEX_LOCK(A) EnterCriticalSection(A)
62 #define MUTEX_UNLOCK(A) LeaveCriticalSection(A)
66 static std::string convertCharPointerToStdString(const char *text)
68 return std::string(text);
71 static std::string convertCharPointerToStdString(const wchar_t *text)
73 int length = WideCharToMultiByte(CP_UTF8, 0, text, -1, NULL, 0, NULL, NULL);
74 std::string s( length-1, '\0' );
75 WideCharToMultiByte(CP_UTF8, 0, text, -1, &s[0], length, NULL, NULL);
79 #elif defined(__LINUX_ALSA__) || defined(__LINUX_PULSE__) || defined(__UNIX_JACK__) || defined(__LINUX_OSS__) || defined(__MACOSX_CORE__)
81 #define MUTEX_INITIALIZE(A) pthread_mutex_init(A, NULL)
82 #define MUTEX_DESTROY(A) pthread_mutex_destroy(A)
83 #define MUTEX_LOCK(A) pthread_mutex_lock(A)
84 #define MUTEX_UNLOCK(A) pthread_mutex_unlock(A)
86 #define MUTEX_INITIALIZE(A) abs(*A) // dummy definitions
87 #define MUTEX_DESTROY(A) abs(*A) // dummy definitions
90 // *************************************************** //
92 // RtAudio definitions.
94 // *************************************************** //
96 std::string RtAudio :: getVersion( void )
98 return RTAUDIO_VERSION;
101 // Define API names and display names.
102 // Must be in same order as API enum.
104 const char* rtaudio_api_names[][2] = {
105 { "unspecified" , "Unknown" },
107 { "pulse" , "Pulse" },
108 { "oss" , "OpenSoundSystem" },
110 { "core" , "CoreAudio" },
111 { "wasapi" , "WASAPI" },
113 { "ds" , "DirectSound" },
114 { "dummy" , "Dummy" },
116 const unsigned int rtaudio_num_api_names =
117 sizeof(rtaudio_api_names)/sizeof(rtaudio_api_names[0]);
119 // The order here will control the order of RtAudio's API search in
121 extern "C" const RtAudio::Api rtaudio_compiled_apis[] = {
122 #if defined(__UNIX_JACK__)
125 #if defined(__LINUX_PULSE__)
126 RtAudio::LINUX_PULSE,
128 #if defined(__LINUX_ALSA__)
131 #if defined(__LINUX_OSS__)
134 #if defined(__WINDOWS_ASIO__)
135 RtAudio::WINDOWS_ASIO,
137 #if defined(__WINDOWS_WASAPI__)
138 RtAudio::WINDOWS_WASAPI,
140 #if defined(__WINDOWS_DS__)
143 #if defined(__MACOSX_CORE__)
144 RtAudio::MACOSX_CORE,
146 #if defined(__RTAUDIO_DUMMY__)
147 RtAudio::RTAUDIO_DUMMY,
149 RtAudio::UNSPECIFIED,
151 extern "C" const unsigned int rtaudio_num_compiled_apis =
152 sizeof(rtaudio_compiled_apis)/sizeof(rtaudio_compiled_apis[0])-1;
155 // This is a compile-time check that rtaudio_num_api_names == RtAudio::NUM_APIS.
156 // If the build breaks here, check that they match.
157 template<bool b> class StaticAssert { private: StaticAssert() {} };
158 template<> class StaticAssert<true>{ public: StaticAssert() {} };
159 class StaticAssertions { StaticAssertions() {
160 StaticAssert<rtaudio_num_api_names == RtAudio::NUM_APIS>();
163 void RtAudio :: getCompiledApi( std::vector<RtAudio::Api> &apis )
165 apis = std::vector<RtAudio::Api>(rtaudio_compiled_apis,
166 rtaudio_compiled_apis + rtaudio_num_compiled_apis);
169 std::string RtAudio :: getApiName( RtAudio::Api api )
171 if (api < 0 || api >= RtAudio::NUM_APIS)
173 return rtaudio_api_names[api][0];
176 std::string RtAudio :: getApiDisplayName( RtAudio::Api api )
178 if (api < 0 || api >= RtAudio::NUM_APIS)
180 return rtaudio_api_names[api][1];
183 RtAudio::Api RtAudio :: getCompiledApiByName( const std::string &name )
186 for (i = 0; i < rtaudio_num_compiled_apis; ++i)
187 if (name == rtaudio_api_names[rtaudio_compiled_apis[i]][0])
188 return rtaudio_compiled_apis[i];
189 return RtAudio::UNSPECIFIED;
192 void RtAudio :: openRtApi( RtAudio::Api api )
198 #if defined(__UNIX_JACK__)
199 if ( api == UNIX_JACK )
200 rtapi_ = new RtApiJack();
202 #if defined(__LINUX_ALSA__)
203 if ( api == LINUX_ALSA )
204 rtapi_ = new RtApiAlsa();
206 #if defined(__LINUX_PULSE__)
207 if ( api == LINUX_PULSE )
208 rtapi_ = new RtApiPulse();
210 #if defined(__LINUX_OSS__)
211 if ( api == LINUX_OSS )
212 rtapi_ = new RtApiOss();
214 #if defined(__WINDOWS_ASIO__)
215 if ( api == WINDOWS_ASIO )
216 rtapi_ = new RtApiAsio();
218 #if defined(__WINDOWS_WASAPI__)
219 if ( api == WINDOWS_WASAPI )
220 rtapi_ = new RtApiWasapi();
222 #if defined(__WINDOWS_DS__)
223 if ( api == WINDOWS_DS )
224 rtapi_ = new RtApiDs();
226 #if defined(__MACOSX_CORE__)
227 if ( api == MACOSX_CORE )
228 rtapi_ = new RtApiCore();
230 #if defined(__RTAUDIO_DUMMY__)
231 if ( api == RTAUDIO_DUMMY )
232 rtapi_ = new RtApiDummy();
236 RtAudio :: RtAudio( RtAudio::Api api )
240 if ( api != UNSPECIFIED ) {
241 // Attempt to open the specified API.
243 if ( rtapi_ ) return;
245 // No compiled support for specified API value. Issue a debug
246 // warning and continue as if no API was specified.
247 std::cerr << "\nRtAudio: no compiled support for specified API argument!\n" << std::endl;
250 // Iterate through the compiled APIs and return as soon as we find
251 // one with at least one device or we reach the end of the list.
252 std::vector< RtAudio::Api > apis;
253 getCompiledApi( apis );
254 for ( unsigned int i=0; i<apis.size(); i++ ) {
255 openRtApi( apis[i] );
256 if ( rtapi_ && rtapi_->getDeviceCount() ) break;
259 if ( rtapi_ ) return;
261 // It should not be possible to get here because the preprocessor
262 // definition __RTAUDIO_DUMMY__ is automatically defined if no
263 // API-specific definitions are passed to the compiler. But just in
264 // case something weird happens, we'll thow an error.
265 std::string errorText = "\nRtAudio: no compiled API support found ... critical error!!\n\n";
266 throw( RtAudioError( errorText, RtAudioError::UNSPECIFIED ) );
269 RtAudio :: ~RtAudio()
275 void RtAudio :: openStream( RtAudio::StreamParameters *outputParameters,
276 RtAudio::StreamParameters *inputParameters,
277 RtAudioFormat format, unsigned int sampleRate,
278 unsigned int *bufferFrames,
279 RtAudioCallback callback, void *userData,
280 RtAudio::StreamOptions *options,
281 RtAudioErrorCallback errorCallback )
283 return rtapi_->openStream( outputParameters, inputParameters, format,
284 sampleRate, bufferFrames, callback,
285 userData, options, errorCallback );
288 // *************************************************** //
290 // Public RtApi definitions (see end of file for
291 // private or protected utility functions).
293 // *************************************************** //
297 stream_.state = STREAM_CLOSED;
298 stream_.mode = UNINITIALIZED;
299 stream_.apiHandle = 0;
300 stream_.userBuffer[0] = 0;
301 stream_.userBuffer[1] = 0;
302 MUTEX_INITIALIZE( &stream_.mutex );
303 showWarnings_ = true;
304 firstErrorOccurred_ = false;
309 MUTEX_DESTROY( &stream_.mutex );
312 void RtApi :: openStream( RtAudio::StreamParameters *oParams,
313 RtAudio::StreamParameters *iParams,
314 RtAudioFormat format, unsigned int sampleRate,
315 unsigned int *bufferFrames,
316 RtAudioCallback callback, void *userData,
317 RtAudio::StreamOptions *options,
318 RtAudioErrorCallback errorCallback )
320 if ( stream_.state != STREAM_CLOSED ) {
321 errorText_ = "RtApi::openStream: a stream is already open!";
322 error( RtAudioError::INVALID_USE );
326 // Clear stream information potentially left from a previously open stream.
329 if ( oParams && oParams->nChannels < 1 ) {
330 errorText_ = "RtApi::openStream: a non-NULL output StreamParameters structure cannot have an nChannels value less than one.";
331 error( RtAudioError::INVALID_USE );
335 if ( iParams && iParams->nChannels < 1 ) {
336 errorText_ = "RtApi::openStream: a non-NULL input StreamParameters structure cannot have an nChannels value less than one.";
337 error( RtAudioError::INVALID_USE );
341 if ( oParams == NULL && iParams == NULL ) {
342 errorText_ = "RtApi::openStream: input and output StreamParameters structures are both NULL!";
343 error( RtAudioError::INVALID_USE );
347 if ( formatBytes(format) == 0 ) {
348 errorText_ = "RtApi::openStream: 'format' parameter value is undefined.";
349 error( RtAudioError::INVALID_USE );
353 unsigned int nDevices = getDeviceCount();
354 unsigned int oChannels = 0;
356 oChannels = oParams->nChannels;
357 if ( oParams->deviceId >= nDevices ) {
358 errorText_ = "RtApi::openStream: output device parameter value is invalid.";
359 error( RtAudioError::INVALID_USE );
364 unsigned int iChannels = 0;
366 iChannels = iParams->nChannels;
367 if ( iParams->deviceId >= nDevices ) {
368 errorText_ = "RtApi::openStream: input device parameter value is invalid.";
369 error( RtAudioError::INVALID_USE );
376 if ( oChannels > 0 ) {
378 result = probeDeviceOpen( oParams->deviceId, OUTPUT, oChannels, oParams->firstChannel,
379 sampleRate, format, bufferFrames, options );
380 if ( result == false ) {
381 error( RtAudioError::SYSTEM_ERROR );
386 if ( iChannels > 0 ) {
388 result = probeDeviceOpen( iParams->deviceId, INPUT, iChannels, iParams->firstChannel,
389 sampleRate, format, bufferFrames, options );
390 if ( result == false ) {
391 if ( oChannels > 0 ) closeStream();
392 error( RtAudioError::SYSTEM_ERROR );
397 stream_.callbackInfo.callback = (void *) callback;
398 stream_.callbackInfo.userData = userData;
399 stream_.callbackInfo.errorCallback = (void *) errorCallback;
401 if ( options ) options->numberOfBuffers = stream_.nBuffers;
402 stream_.state = STREAM_STOPPED;
405 unsigned int RtApi :: getDefaultInputDevice( void )
407 // Should be implemented in subclasses if possible.
411 unsigned int RtApi :: getDefaultOutputDevice( void )
413 // Should be implemented in subclasses if possible.
417 void RtApi :: closeStream( void )
419 // MUST be implemented in subclasses!
423 bool RtApi :: probeDeviceOpen( unsigned int /*device*/, StreamMode /*mode*/, unsigned int /*channels*/,
424 unsigned int /*firstChannel*/, unsigned int /*sampleRate*/,
425 RtAudioFormat /*format*/, unsigned int * /*bufferSize*/,
426 RtAudio::StreamOptions * /*options*/ )
428 // MUST be implemented in subclasses!
432 void RtApi :: tickStreamTime( void )
434 // Subclasses that do not provide their own implementation of
435 // getStreamTime should call this function once per buffer I/O to
436 // provide basic stream time support.
438 stream_.streamTime += ( stream_.bufferSize * 1.0 / stream_.sampleRate );
440 #if defined( HAVE_GETTIMEOFDAY )
441 gettimeofday( &stream_.lastTickTimestamp, NULL );
445 long RtApi :: getStreamLatency( void )
449 long totalLatency = 0;
450 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
451 totalLatency = stream_.latency[0];
452 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
453 totalLatency += stream_.latency[1];
458 double RtApi :: getStreamTime( void )
462 #if defined( HAVE_GETTIMEOFDAY )
463 // Return a very accurate estimate of the stream time by
464 // adding in the elapsed time since the last tick.
468 if ( stream_.state != STREAM_RUNNING || (stream_.lastTickTimestamp.tv_sec == 0 && stream_.lastTickTimestamp.tv_usec == 0) )
469 return stream_.streamTime;
471 gettimeofday( &now, NULL );
472 then = stream_.lastTickTimestamp;
473 return stream_.streamTime +
474 ((now.tv_sec + 0.000001 * now.tv_usec) -
475 (then.tv_sec + 0.000001 * then.tv_usec));
477 return stream_.streamTime;
481 void RtApi :: setStreamTime( double time )
486 stream_.streamTime = time;
487 #if defined( HAVE_GETTIMEOFDAY )
488 gettimeofday( &stream_.lastTickTimestamp, NULL );
492 unsigned int RtApi :: getStreamSampleRate( void )
496 return stream_.sampleRate;
499 void RtApi :: startStream( void )
501 #if defined( HAVE_GETTIMEOFDAY )
502 stream_.lastTickTimestamp.tv_sec = 0;
503 stream_.lastTickTimestamp.tv_usec = 0;
508 // *************************************************** //
510 // OS/API-specific methods.
512 // *************************************************** //
514 #if defined(__MACOSX_CORE__)
516 // The OS X CoreAudio API is designed to use a separate callback
517 // procedure for each of its audio devices. A single RtAudio duplex
518 // stream using two different devices is supported here, though it
519 // cannot be guaranteed to always behave correctly because we cannot
520 // synchronize these two callbacks.
522 // A property listener is installed for over/underrun information.
523 // However, no functionality is currently provided to allow property
524 // listeners to trigger user handlers because it is unclear what could
525 // be done if a critical stream parameter (buffer size, sample rate,
526 // device disconnect) notification arrived. The listeners entail
527 // quite a bit of extra code and most likely, a user program wouldn't
528 // be prepared for the result anyway. However, we do provide a flag
529 // to the client callback function to inform of an over/underrun.
531 // A structure to hold various information related to the CoreAudio API
534 AudioDeviceID id[2]; // device ids
535 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
536 AudioDeviceIOProcID procId[2];
538 UInt32 iStream[2]; // device stream index (or first if using multiple)
539 UInt32 nStreams[2]; // number of streams to use
542 pthread_cond_t condition;
543 int drainCounter; // Tracks callback counts when draining
544 bool internalDrain; // Indicates if stop is initiated from callback or not.
547 :deviceBuffer(0), drainCounter(0), internalDrain(false) { nStreams[0] = 1; nStreams[1] = 1; id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
550 RtApiCore:: RtApiCore()
552 #if defined( AVAILABLE_MAC_OS_X_VERSION_10_6_AND_LATER )
553 // This is a largely undocumented but absolutely necessary
554 // requirement starting with OS-X 10.6. If not called, queries and
555 // updates to various audio device properties are not handled
557 CFRunLoopRef theRunLoop = NULL;
558 AudioObjectPropertyAddress property = { kAudioHardwarePropertyRunLoop,
559 kAudioObjectPropertyScopeGlobal,
560 kAudioObjectPropertyElementMaster };
561 OSStatus result = AudioObjectSetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, sizeof(CFRunLoopRef), &theRunLoop);
562 if ( result != noErr ) {
563 errorText_ = "RtApiCore::RtApiCore: error setting run loop property!";
564 error( RtAudioError::WARNING );
569 RtApiCore :: ~RtApiCore()
571 // The subclass destructor gets called before the base class
572 // destructor, so close an existing stream before deallocating
573 // apiDeviceId memory.
574 if ( stream_.state != STREAM_CLOSED ) closeStream();
577 unsigned int RtApiCore :: getDeviceCount( void )
579 // Find out how many audio devices there are, if any.
581 AudioObjectPropertyAddress propertyAddress = { kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
582 OSStatus result = AudioObjectGetPropertyDataSize( kAudioObjectSystemObject, &propertyAddress, 0, NULL, &dataSize );
583 if ( result != noErr ) {
584 errorText_ = "RtApiCore::getDeviceCount: OS-X error getting device info!";
585 error( RtAudioError::WARNING );
589 return dataSize / sizeof( AudioDeviceID );
592 unsigned int RtApiCore :: getDefaultInputDevice( void )
594 unsigned int nDevices = getDeviceCount();
595 if ( nDevices <= 1 ) return 0;
598 UInt32 dataSize = sizeof( AudioDeviceID );
599 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultInputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
600 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
601 if ( result != noErr ) {
602 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device.";
603 error( RtAudioError::WARNING );
607 dataSize *= nDevices;
608 AudioDeviceID deviceList[ nDevices ];
609 property.mSelector = kAudioHardwarePropertyDevices;
610 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
611 if ( result != noErr ) {
612 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device IDs.";
613 error( RtAudioError::WARNING );
617 for ( unsigned int i=0; i<nDevices; i++ )
618 if ( id == deviceList[i] ) return i;
620 errorText_ = "RtApiCore::getDefaultInputDevice: No default device found!";
621 error( RtAudioError::WARNING );
625 unsigned int RtApiCore :: getDefaultOutputDevice( void )
627 unsigned int nDevices = getDeviceCount();
628 if ( nDevices <= 1 ) return 0;
631 UInt32 dataSize = sizeof( AudioDeviceID );
632 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultOutputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
633 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
634 if ( result != noErr ) {
635 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device.";
636 error( RtAudioError::WARNING );
640 dataSize = sizeof( AudioDeviceID ) * nDevices;
641 AudioDeviceID deviceList[ nDevices ];
642 property.mSelector = kAudioHardwarePropertyDevices;
643 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
644 if ( result != noErr ) {
645 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device IDs.";
646 error( RtAudioError::WARNING );
650 for ( unsigned int i=0; i<nDevices; i++ )
651 if ( id == deviceList[i] ) return i;
653 errorText_ = "RtApiCore::getDefaultOutputDevice: No default device found!";
654 error( RtAudioError::WARNING );
658 RtAudio::DeviceInfo RtApiCore :: getDeviceInfo( unsigned int device )
660 RtAudio::DeviceInfo info;
664 unsigned int nDevices = getDeviceCount();
665 if ( nDevices == 0 ) {
666 errorText_ = "RtApiCore::getDeviceInfo: no devices found!";
667 error( RtAudioError::INVALID_USE );
671 if ( device >= nDevices ) {
672 errorText_ = "RtApiCore::getDeviceInfo: device ID is invalid!";
673 error( RtAudioError::INVALID_USE );
677 AudioDeviceID deviceList[ nDevices ];
678 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
679 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
680 kAudioObjectPropertyScopeGlobal,
681 kAudioObjectPropertyElementMaster };
682 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
683 0, NULL, &dataSize, (void *) &deviceList );
684 if ( result != noErr ) {
685 errorText_ = "RtApiCore::getDeviceInfo: OS-X system error getting device IDs.";
686 error( RtAudioError::WARNING );
690 AudioDeviceID id = deviceList[ device ];
692 // Get the device name.
695 dataSize = sizeof( CFStringRef );
696 property.mSelector = kAudioObjectPropertyManufacturer;
697 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
698 if ( result != noErr ) {
699 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device manufacturer.";
700 errorText_ = errorStream_.str();
701 error( RtAudioError::WARNING );
705 //const char *mname = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
706 int length = CFStringGetLength(cfname);
707 char *mname = (char *)malloc(length * 3 + 1);
708 #if defined( UNICODE ) || defined( _UNICODE )
709 CFStringGetCString(cfname, mname, length * 3 + 1, kCFStringEncodingUTF8);
711 CFStringGetCString(cfname, mname, length * 3 + 1, CFStringGetSystemEncoding());
713 info.name.append( (const char *)mname, strlen(mname) );
714 info.name.append( ": " );
718 property.mSelector = kAudioObjectPropertyName;
719 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
720 if ( result != noErr ) {
721 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device name.";
722 errorText_ = errorStream_.str();
723 error( RtAudioError::WARNING );
727 //const char *name = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
728 length = CFStringGetLength(cfname);
729 char *name = (char *)malloc(length * 3 + 1);
730 #if defined( UNICODE ) || defined( _UNICODE )
731 CFStringGetCString(cfname, name, length * 3 + 1, kCFStringEncodingUTF8);
733 CFStringGetCString(cfname, name, length * 3 + 1, CFStringGetSystemEncoding());
735 info.name.append( (const char *)name, strlen(name) );
739 // Get the output stream "configuration".
740 AudioBufferList *bufferList = nil;
741 property.mSelector = kAudioDevicePropertyStreamConfiguration;
742 property.mScope = kAudioDevicePropertyScopeOutput;
743 // property.mElement = kAudioObjectPropertyElementWildcard;
745 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
746 if ( result != noErr || dataSize == 0 ) {
747 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration info for device (" << device << ").";
748 errorText_ = errorStream_.str();
749 error( RtAudioError::WARNING );
753 // Allocate the AudioBufferList.
754 bufferList = (AudioBufferList *) malloc( dataSize );
755 if ( bufferList == NULL ) {
756 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating output AudioBufferList.";
757 error( RtAudioError::WARNING );
761 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
762 if ( result != noErr || dataSize == 0 ) {
764 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration for device (" << device << ").";
765 errorText_ = errorStream_.str();
766 error( RtAudioError::WARNING );
770 // Get output channel information.
771 unsigned int i, nStreams = bufferList->mNumberBuffers;
772 for ( i=0; i<nStreams; i++ )
773 info.outputChannels += bufferList->mBuffers[i].mNumberChannels;
776 // Get the input stream "configuration".
777 property.mScope = kAudioDevicePropertyScopeInput;
778 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
779 if ( result != noErr || dataSize == 0 ) {
780 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration info for device (" << device << ").";
781 errorText_ = errorStream_.str();
782 error( RtAudioError::WARNING );
786 // Allocate the AudioBufferList.
787 bufferList = (AudioBufferList *) malloc( dataSize );
788 if ( bufferList == NULL ) {
789 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating input AudioBufferList.";
790 error( RtAudioError::WARNING );
794 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
795 if (result != noErr || dataSize == 0) {
797 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration for device (" << device << ").";
798 errorText_ = errorStream_.str();
799 error( RtAudioError::WARNING );
803 // Get input channel information.
804 nStreams = bufferList->mNumberBuffers;
805 for ( i=0; i<nStreams; i++ )
806 info.inputChannels += bufferList->mBuffers[i].mNumberChannels;
809 // If device opens for both playback and capture, we determine the channels.
810 if ( info.outputChannels > 0 && info.inputChannels > 0 )
811 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
813 // Probe the device sample rates.
814 bool isInput = false;
815 if ( info.outputChannels == 0 ) isInput = true;
817 // Determine the supported sample rates.
818 property.mSelector = kAudioDevicePropertyAvailableNominalSampleRates;
819 if ( isInput == false ) property.mScope = kAudioDevicePropertyScopeOutput;
820 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
821 if ( result != kAudioHardwareNoError || dataSize == 0 ) {
822 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rate info.";
823 errorText_ = errorStream_.str();
824 error( RtAudioError::WARNING );
828 UInt32 nRanges = dataSize / sizeof( AudioValueRange );
829 AudioValueRange rangeList[ nRanges ];
830 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &rangeList );
831 if ( result != kAudioHardwareNoError ) {
832 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rates.";
833 errorText_ = errorStream_.str();
834 error( RtAudioError::WARNING );
838 // The sample rate reporting mechanism is a bit of a mystery. It
839 // seems that it can either return individual rates or a range of
840 // rates. I assume that if the min / max range values are the same,
841 // then that represents a single supported rate and if the min / max
842 // range values are different, the device supports an arbitrary
843 // range of values (though there might be multiple ranges, so we'll
844 // use the most conservative range).
845 Float64 minimumRate = 1.0, maximumRate = 10000000000.0;
846 bool haveValueRange = false;
847 info.sampleRates.clear();
848 for ( UInt32 i=0; i<nRanges; i++ ) {
849 if ( rangeList[i].mMinimum == rangeList[i].mMaximum ) {
850 unsigned int tmpSr = (unsigned int) rangeList[i].mMinimum;
851 info.sampleRates.push_back( tmpSr );
853 if ( !info.preferredSampleRate || ( tmpSr <= 48000 && tmpSr > info.preferredSampleRate ) )
854 info.preferredSampleRate = tmpSr;
857 haveValueRange = true;
858 if ( rangeList[i].mMinimum > minimumRate ) minimumRate = rangeList[i].mMinimum;
859 if ( rangeList[i].mMaximum < maximumRate ) maximumRate = rangeList[i].mMaximum;
863 if ( haveValueRange ) {
864 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
865 if ( SAMPLE_RATES[k] >= (unsigned int) minimumRate && SAMPLE_RATES[k] <= (unsigned int) maximumRate ) {
866 info.sampleRates.push_back( SAMPLE_RATES[k] );
868 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
869 info.preferredSampleRate = SAMPLE_RATES[k];
874 // Sort and remove any redundant values
875 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
876 info.sampleRates.erase( unique( info.sampleRates.begin(), info.sampleRates.end() ), info.sampleRates.end() );
878 if ( info.sampleRates.size() == 0 ) {
879 errorStream_ << "RtApiCore::probeDeviceInfo: No supported sample rates found for device (" << device << ").";
880 errorText_ = errorStream_.str();
881 error( RtAudioError::WARNING );
885 // CoreAudio always uses 32-bit floating point data for PCM streams.
886 // Thus, any other "physical" formats supported by the device are of
887 // no interest to the client.
888 info.nativeFormats = RTAUDIO_FLOAT32;
890 if ( info.outputChannels > 0 )
891 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
892 if ( info.inputChannels > 0 )
893 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
899 static OSStatus callbackHandler( AudioDeviceID inDevice,
900 const AudioTimeStamp* /*inNow*/,
901 const AudioBufferList* inInputData,
902 const AudioTimeStamp* /*inInputTime*/,
903 AudioBufferList* outOutputData,
904 const AudioTimeStamp* /*inOutputTime*/,
907 CallbackInfo *info = (CallbackInfo *) infoPointer;
909 RtApiCore *object = (RtApiCore *) info->object;
910 if ( object->callbackEvent( inDevice, inInputData, outOutputData ) == false )
911 return kAudioHardwareUnspecifiedError;
913 return kAudioHardwareNoError;
916 static OSStatus xrunListener( AudioObjectID /*inDevice*/,
918 const AudioObjectPropertyAddress properties[],
919 void* handlePointer )
921 CoreHandle *handle = (CoreHandle *) handlePointer;
922 for ( UInt32 i=0; i<nAddresses; i++ ) {
923 if ( properties[i].mSelector == kAudioDeviceProcessorOverload ) {
924 if ( properties[i].mScope == kAudioDevicePropertyScopeInput )
925 handle->xrun[1] = true;
927 handle->xrun[0] = true;
931 return kAudioHardwareNoError;
934 static OSStatus rateListener( AudioObjectID inDevice,
935 UInt32 /*nAddresses*/,
936 const AudioObjectPropertyAddress /*properties*/[],
939 Float64 *rate = (Float64 *) ratePointer;
940 UInt32 dataSize = sizeof( Float64 );
941 AudioObjectPropertyAddress property = { kAudioDevicePropertyNominalSampleRate,
942 kAudioObjectPropertyScopeGlobal,
943 kAudioObjectPropertyElementMaster };
944 AudioObjectGetPropertyData( inDevice, &property, 0, NULL, &dataSize, rate );
945 return kAudioHardwareNoError;
948 bool RtApiCore :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
949 unsigned int firstChannel, unsigned int sampleRate,
950 RtAudioFormat format, unsigned int *bufferSize,
951 RtAudio::StreamOptions *options )
954 unsigned int nDevices = getDeviceCount();
955 if ( nDevices == 0 ) {
956 // This should not happen because a check is made before this function is called.
957 errorText_ = "RtApiCore::probeDeviceOpen: no devices found!";
961 if ( device >= nDevices ) {
962 // This should not happen because a check is made before this function is called.
963 errorText_ = "RtApiCore::probeDeviceOpen: device ID is invalid!";
967 AudioDeviceID deviceList[ nDevices ];
968 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
969 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
970 kAudioObjectPropertyScopeGlobal,
971 kAudioObjectPropertyElementMaster };
972 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
973 0, NULL, &dataSize, (void *) &deviceList );
974 if ( result != noErr ) {
975 errorText_ = "RtApiCore::probeDeviceOpen: OS-X system error getting device IDs.";
979 AudioDeviceID id = deviceList[ device ];
981 // Setup for stream mode.
982 bool isInput = false;
983 if ( mode == INPUT ) {
985 property.mScope = kAudioDevicePropertyScopeInput;
988 property.mScope = kAudioDevicePropertyScopeOutput;
990 // Get the stream "configuration".
991 AudioBufferList *bufferList = nil;
993 property.mSelector = kAudioDevicePropertyStreamConfiguration;
994 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
995 if ( result != noErr || dataSize == 0 ) {
996 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration info for device (" << device << ").";
997 errorText_ = errorStream_.str();
1001 // Allocate the AudioBufferList.
1002 bufferList = (AudioBufferList *) malloc( dataSize );
1003 if ( bufferList == NULL ) {
1004 errorText_ = "RtApiCore::probeDeviceOpen: memory error allocating AudioBufferList.";
1008 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
1009 if (result != noErr || dataSize == 0) {
1011 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration for device (" << device << ").";
1012 errorText_ = errorStream_.str();
1016 // Search for one or more streams that contain the desired number of
1017 // channels. CoreAudio devices can have an arbitrary number of
1018 // streams and each stream can have an arbitrary number of channels.
1019 // For each stream, a single buffer of interleaved samples is
1020 // provided. RtAudio prefers the use of one stream of interleaved
1021 // data or multiple consecutive single-channel streams. However, we
1022 // now support multiple consecutive multi-channel streams of
1023 // interleaved data as well.
1024 UInt32 iStream, offsetCounter = firstChannel;
1025 UInt32 nStreams = bufferList->mNumberBuffers;
1026 bool monoMode = false;
1027 bool foundStream = false;
1029 // First check that the device supports the requested number of
1031 UInt32 deviceChannels = 0;
1032 for ( iStream=0; iStream<nStreams; iStream++ )
1033 deviceChannels += bufferList->mBuffers[iStream].mNumberChannels;
1035 if ( deviceChannels < ( channels + firstChannel ) ) {
1037 errorStream_ << "RtApiCore::probeDeviceOpen: the device (" << device << ") does not support the requested channel count.";
1038 errorText_ = errorStream_.str();
1042 // Look for a single stream meeting our needs.
1043 UInt32 firstStream, streamCount = 1, streamChannels = 0, channelOffset = 0;
1044 for ( iStream=0; iStream<nStreams; iStream++ ) {
1045 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
1046 if ( streamChannels >= channels + offsetCounter ) {
1047 firstStream = iStream;
1048 channelOffset = offsetCounter;
1052 if ( streamChannels > offsetCounter ) break;
1053 offsetCounter -= streamChannels;
1056 // If we didn't find a single stream above, then we should be able
1057 // to meet the channel specification with multiple streams.
1058 if ( foundStream == false ) {
1060 offsetCounter = firstChannel;
1061 for ( iStream=0; iStream<nStreams; iStream++ ) {
1062 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
1063 if ( streamChannels > offsetCounter ) break;
1064 offsetCounter -= streamChannels;
1067 firstStream = iStream;
1068 channelOffset = offsetCounter;
1069 Int32 channelCounter = channels + offsetCounter - streamChannels;
1071 if ( streamChannels > 1 ) monoMode = false;
1072 while ( channelCounter > 0 ) {
1073 streamChannels = bufferList->mBuffers[++iStream].mNumberChannels;
1074 if ( streamChannels > 1 ) monoMode = false;
1075 channelCounter -= streamChannels;
1082 // Determine the buffer size.
1083 AudioValueRange bufferRange;
1084 dataSize = sizeof( AudioValueRange );
1085 property.mSelector = kAudioDevicePropertyBufferFrameSizeRange;
1086 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &bufferRange );
1088 if ( result != noErr ) {
1089 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting buffer size range for device (" << device << ").";
1090 errorText_ = errorStream_.str();
1094 if ( bufferRange.mMinimum > *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMinimum;
1095 else if ( bufferRange.mMaximum < *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMaximum;
1096 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) *bufferSize = (unsigned long) bufferRange.mMinimum;
1098 // Set the buffer size. For multiple streams, I'm assuming we only
1099 // need to make this setting for the master channel.
1100 UInt32 theSize = (UInt32) *bufferSize;
1101 dataSize = sizeof( UInt32 );
1102 property.mSelector = kAudioDevicePropertyBufferFrameSize;
1103 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &theSize );
1105 if ( result != noErr ) {
1106 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting the buffer size for device (" << device << ").";
1107 errorText_ = errorStream_.str();
1111 // If attempting to setup a duplex stream, the bufferSize parameter
1112 // MUST be the same in both directions!
1113 *bufferSize = theSize;
1114 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
1115 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << device << ").";
1116 errorText_ = errorStream_.str();
1120 stream_.bufferSize = *bufferSize;
1121 stream_.nBuffers = 1;
1123 // Try to set "hog" mode ... it's not clear to me this is working.
1124 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) {
1126 dataSize = sizeof( hog_pid );
1127 property.mSelector = kAudioDevicePropertyHogMode;
1128 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &hog_pid );
1129 if ( result != noErr ) {
1130 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting 'hog' state!";
1131 errorText_ = errorStream_.str();
1135 if ( hog_pid != getpid() ) {
1137 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &hog_pid );
1138 if ( result != noErr ) {
1139 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting 'hog' state!";
1140 errorText_ = errorStream_.str();
1146 // Check and if necessary, change the sample rate for the device.
1147 Float64 nominalRate;
1148 dataSize = sizeof( Float64 );
1149 property.mSelector = kAudioDevicePropertyNominalSampleRate;
1150 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &nominalRate );
1151 if ( result != noErr ) {
1152 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting current sample rate.";
1153 errorText_ = errorStream_.str();
1157 // Only change the sample rate if off by more than 1 Hz.
1158 if ( fabs( nominalRate - (double)sampleRate ) > 1.0 ) {
1160 // Set a property listener for the sample rate change
1161 Float64 reportedRate = 0.0;
1162 AudioObjectPropertyAddress tmp = { kAudioDevicePropertyNominalSampleRate, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
1163 result = AudioObjectAddPropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1164 if ( result != noErr ) {
1165 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate property listener for device (" << device << ").";
1166 errorText_ = errorStream_.str();
1170 nominalRate = (Float64) sampleRate;
1171 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &nominalRate );
1172 if ( result != noErr ) {
1173 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1174 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate for device (" << device << ").";
1175 errorText_ = errorStream_.str();
1179 // Now wait until the reported nominal rate is what we just set.
1180 UInt32 microCounter = 0;
1181 while ( reportedRate != nominalRate ) {
1182 microCounter += 5000;
1183 if ( microCounter > 5000000 ) break;
1187 // Remove the property listener.
1188 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1190 if ( microCounter > 5000000 ) {
1191 errorStream_ << "RtApiCore::probeDeviceOpen: timeout waiting for sample rate update for device (" << device << ").";
1192 errorText_ = errorStream_.str();
1197 // Now set the stream format for all streams. Also, check the
1198 // physical format of the device and change that if necessary.
1199 AudioStreamBasicDescription description;
1200 dataSize = sizeof( AudioStreamBasicDescription );
1201 property.mSelector = kAudioStreamPropertyVirtualFormat;
1202 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
1203 if ( result != noErr ) {
1204 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream format for device (" << device << ").";
1205 errorText_ = errorStream_.str();
1209 // Set the sample rate and data format id. However, only make the
1210 // change if the sample rate is not within 1.0 of the desired
1211 // rate and the format is not linear pcm.
1212 bool updateFormat = false;
1213 if ( fabs( description.mSampleRate - (Float64)sampleRate ) > 1.0 ) {
1214 description.mSampleRate = (Float64) sampleRate;
1215 updateFormat = true;
1218 if ( description.mFormatID != kAudioFormatLinearPCM ) {
1219 description.mFormatID = kAudioFormatLinearPCM;
1220 updateFormat = true;
1223 if ( updateFormat ) {
1224 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &description );
1225 if ( result != noErr ) {
1226 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate or data format for device (" << device << ").";
1227 errorText_ = errorStream_.str();
1232 // Now check the physical format.
1233 property.mSelector = kAudioStreamPropertyPhysicalFormat;
1234 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
1235 if ( result != noErr ) {
1236 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream physical format for device (" << device << ").";
1237 errorText_ = errorStream_.str();
1241 //std::cout << "Current physical stream format:" << std::endl;
1242 //std::cout << " mBitsPerChan = " << description.mBitsPerChannel << std::endl;
1243 //std::cout << " aligned high = " << (description.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (description.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
1244 //std::cout << " bytesPerFrame = " << description.mBytesPerFrame << std::endl;
1245 //std::cout << " sample rate = " << description.mSampleRate << std::endl;
1247 if ( description.mFormatID != kAudioFormatLinearPCM || description.mBitsPerChannel < 16 ) {
1248 description.mFormatID = kAudioFormatLinearPCM;
1249 //description.mSampleRate = (Float64) sampleRate;
1250 AudioStreamBasicDescription testDescription = description;
1253 // We'll try higher bit rates first and then work our way down.
1254 std::vector< std::pair<UInt32, UInt32> > physicalFormats;
1255 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsFloat) & ~kLinearPCMFormatFlagIsSignedInteger;
1256 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
1257 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
1258 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
1259 physicalFormats.push_back( std::pair<Float32, UInt32>( 24, formatFlags ) ); // 24-bit packed
1260 formatFlags &= ~( kAudioFormatFlagIsPacked | kAudioFormatFlagIsAlignedHigh );
1261 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.2, formatFlags ) ); // 24-bit in 4 bytes, aligned low
1262 formatFlags |= kAudioFormatFlagIsAlignedHigh;
1263 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.4, formatFlags ) ); // 24-bit in 4 bytes, aligned high
1264 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
1265 physicalFormats.push_back( std::pair<Float32, UInt32>( 16, formatFlags ) );
1266 physicalFormats.push_back( std::pair<Float32, UInt32>( 8, formatFlags ) );
1268 bool setPhysicalFormat = false;
1269 for( unsigned int i=0; i<physicalFormats.size(); i++ ) {
1270 testDescription = description;
1271 testDescription.mBitsPerChannel = (UInt32) physicalFormats[i].first;
1272 testDescription.mFormatFlags = physicalFormats[i].second;
1273 if ( (24 == (UInt32)physicalFormats[i].first) && ~( physicalFormats[i].second & kAudioFormatFlagIsPacked ) )
1274 testDescription.mBytesPerFrame = 4 * testDescription.mChannelsPerFrame;
1276 testDescription.mBytesPerFrame = testDescription.mBitsPerChannel/8 * testDescription.mChannelsPerFrame;
1277 testDescription.mBytesPerPacket = testDescription.mBytesPerFrame * testDescription.mFramesPerPacket;
1278 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &testDescription );
1279 if ( result == noErr ) {
1280 setPhysicalFormat = true;
1281 //std::cout << "Updated physical stream format:" << std::endl;
1282 //std::cout << " mBitsPerChan = " << testDescription.mBitsPerChannel << std::endl;
1283 //std::cout << " aligned high = " << (testDescription.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (testDescription.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
1284 //std::cout << " bytesPerFrame = " << testDescription.mBytesPerFrame << std::endl;
1285 //std::cout << " sample rate = " << testDescription.mSampleRate << std::endl;
1290 if ( !setPhysicalFormat ) {
1291 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting physical data format for device (" << device << ").";
1292 errorText_ = errorStream_.str();
1295 } // done setting virtual/physical formats.
1297 // Get the stream / device latency.
1299 dataSize = sizeof( UInt32 );
1300 property.mSelector = kAudioDevicePropertyLatency;
1301 if ( AudioObjectHasProperty( id, &property ) == true ) {
1302 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &latency );
1303 if ( result == kAudioHardwareNoError ) stream_.latency[ mode ] = latency;
1305 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting device latency for device (" << device << ").";
1306 errorText_ = errorStream_.str();
1307 error( RtAudioError::WARNING );
1311 // Byte-swapping: According to AudioHardware.h, the stream data will
1312 // always be presented in native-endian format, so we should never
1313 // need to byte swap.
1314 stream_.doByteSwap[mode] = false;
1316 // From the CoreAudio documentation, PCM data must be supplied as
1318 stream_.userFormat = format;
1319 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
1321 if ( streamCount == 1 )
1322 stream_.nDeviceChannels[mode] = description.mChannelsPerFrame;
1323 else // multiple streams
1324 stream_.nDeviceChannels[mode] = channels;
1325 stream_.nUserChannels[mode] = channels;
1326 stream_.channelOffset[mode] = channelOffset; // offset within a CoreAudio stream
1327 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
1328 else stream_.userInterleaved = true;
1329 stream_.deviceInterleaved[mode] = true;
1330 if ( monoMode == true ) stream_.deviceInterleaved[mode] = false;
1332 // Set flags for buffer conversion.
1333 stream_.doConvertBuffer[mode] = false;
1334 if ( stream_.userFormat != stream_.deviceFormat[mode] )
1335 stream_.doConvertBuffer[mode] = true;
1336 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
1337 stream_.doConvertBuffer[mode] = true;
1338 if ( streamCount == 1 ) {
1339 if ( stream_.nUserChannels[mode] > 1 &&
1340 stream_.userInterleaved != stream_.deviceInterleaved[mode] )
1341 stream_.doConvertBuffer[mode] = true;
1343 else if ( monoMode && stream_.userInterleaved )
1344 stream_.doConvertBuffer[mode] = true;
1346 // Allocate our CoreHandle structure for the stream.
1347 CoreHandle *handle = 0;
1348 if ( stream_.apiHandle == 0 ) {
1350 handle = new CoreHandle;
1352 catch ( std::bad_alloc& ) {
1353 errorText_ = "RtApiCore::probeDeviceOpen: error allocating CoreHandle memory.";
1357 if ( pthread_cond_init( &handle->condition, NULL ) ) {
1358 errorText_ = "RtApiCore::probeDeviceOpen: error initializing pthread condition variable.";
1361 stream_.apiHandle = (void *) handle;
1364 handle = (CoreHandle *) stream_.apiHandle;
1365 handle->iStream[mode] = firstStream;
1366 handle->nStreams[mode] = streamCount;
1367 handle->id[mode] = id;
1369 // Allocate necessary internal buffers.
1370 unsigned long bufferBytes;
1371 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
1372 // stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
1373 stream_.userBuffer[mode] = (char *) malloc( bufferBytes * sizeof(char) );
1374 memset( stream_.userBuffer[mode], 0, bufferBytes * sizeof(char) );
1375 if ( stream_.userBuffer[mode] == NULL ) {
1376 errorText_ = "RtApiCore::probeDeviceOpen: error allocating user buffer memory.";
1380 // If possible, we will make use of the CoreAudio stream buffers as
1381 // "device buffers". However, we can't do this if using multiple
1383 if ( stream_.doConvertBuffer[mode] && handle->nStreams[mode] > 1 ) {
1385 bool makeBuffer = true;
1386 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
1387 if ( mode == INPUT ) {
1388 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
1389 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
1390 if ( bufferBytes <= bytesOut ) makeBuffer = false;
1395 bufferBytes *= *bufferSize;
1396 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
1397 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
1398 if ( stream_.deviceBuffer == NULL ) {
1399 errorText_ = "RtApiCore::probeDeviceOpen: error allocating device buffer memory.";
1405 stream_.sampleRate = sampleRate;
1406 stream_.device[mode] = device;
1407 stream_.state = STREAM_STOPPED;
1408 stream_.callbackInfo.object = (void *) this;
1410 // Setup the buffer conversion information structure.
1411 if ( stream_.doConvertBuffer[mode] ) {
1412 if ( streamCount > 1 ) setConvertInfo( mode, 0 );
1413 else setConvertInfo( mode, channelOffset );
1416 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device )
1417 // Only one callback procedure per device.
1418 stream_.mode = DUPLEX;
1420 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1421 result = AudioDeviceCreateIOProcID( id, callbackHandler, (void *) &stream_.callbackInfo, &handle->procId[mode] );
1423 // deprecated in favor of AudioDeviceCreateIOProcID()
1424 result = AudioDeviceAddIOProc( id, callbackHandler, (void *) &stream_.callbackInfo );
1426 if ( result != noErr ) {
1427 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting callback for device (" << device << ").";
1428 errorText_ = errorStream_.str();
1431 if ( stream_.mode == OUTPUT && mode == INPUT )
1432 stream_.mode = DUPLEX;
1434 stream_.mode = mode;
1437 // Setup the device property listener for over/underload.
1438 property.mSelector = kAudioDeviceProcessorOverload;
1439 property.mScope = kAudioObjectPropertyScopeGlobal;
1440 result = AudioObjectAddPropertyListener( id, &property, xrunListener, (void *) handle );
1446 pthread_cond_destroy( &handle->condition );
1448 stream_.apiHandle = 0;
1451 for ( int i=0; i<2; i++ ) {
1452 if ( stream_.userBuffer[i] ) {
1453 free( stream_.userBuffer[i] );
1454 stream_.userBuffer[i] = 0;
1458 if ( stream_.deviceBuffer ) {
1459 free( stream_.deviceBuffer );
1460 stream_.deviceBuffer = 0;
1463 stream_.state = STREAM_CLOSED;
1467 void RtApiCore :: closeStream( void )
1469 if ( stream_.state == STREAM_CLOSED ) {
1470 errorText_ = "RtApiCore::closeStream(): no open stream to close!";
1471 error( RtAudioError::WARNING );
1475 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1476 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1478 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
1479 kAudioObjectPropertyScopeGlobal,
1480 kAudioObjectPropertyElementMaster };
1482 property.mSelector = kAudioDeviceProcessorOverload;
1483 property.mScope = kAudioObjectPropertyScopeGlobal;
1484 if (AudioObjectRemovePropertyListener( handle->id[0], &property, xrunListener, (void *) handle ) != noErr) {
1485 errorText_ = "RtApiCore::closeStream(): error removing property listener!";
1486 error( RtAudioError::WARNING );
1489 if ( stream_.state == STREAM_RUNNING )
1490 AudioDeviceStop( handle->id[0], callbackHandler );
1491 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1492 AudioDeviceDestroyIOProcID( handle->id[0], handle->procId[0] );
1494 // deprecated in favor of AudioDeviceDestroyIOProcID()
1495 AudioDeviceRemoveIOProc( handle->id[0], callbackHandler );
1499 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1501 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
1502 kAudioObjectPropertyScopeGlobal,
1503 kAudioObjectPropertyElementMaster };
1505 property.mSelector = kAudioDeviceProcessorOverload;
1506 property.mScope = kAudioObjectPropertyScopeGlobal;
1507 if (AudioObjectRemovePropertyListener( handle->id[1], &property, xrunListener, (void *) handle ) != noErr) {
1508 errorText_ = "RtApiCore::closeStream(): error removing property listener!";
1509 error( RtAudioError::WARNING );
1512 if ( stream_.state == STREAM_RUNNING )
1513 AudioDeviceStop( handle->id[1], callbackHandler );
1514 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1515 AudioDeviceDestroyIOProcID( handle->id[1], handle->procId[1] );
1517 // deprecated in favor of AudioDeviceDestroyIOProcID()
1518 AudioDeviceRemoveIOProc( handle->id[1], callbackHandler );
1522 for ( int i=0; i<2; i++ ) {
1523 if ( stream_.userBuffer[i] ) {
1524 free( stream_.userBuffer[i] );
1525 stream_.userBuffer[i] = 0;
1529 if ( stream_.deviceBuffer ) {
1530 free( stream_.deviceBuffer );
1531 stream_.deviceBuffer = 0;
1534 // Destroy pthread condition variable.
1535 pthread_cond_destroy( &handle->condition );
1537 stream_.apiHandle = 0;
1539 stream_.mode = UNINITIALIZED;
1540 stream_.state = STREAM_CLOSED;
1543 void RtApiCore :: startStream( void )
1546 RtApi::startStream();
1547 if ( stream_.state == STREAM_RUNNING ) {
1548 errorText_ = "RtApiCore::startStream(): the stream is already running!";
1549 error( RtAudioError::WARNING );
1553 #if defined( HAVE_GETTIMEOFDAY )
1554 gettimeofday( &stream_.lastTickTimestamp, NULL );
1557 OSStatus result = noErr;
1558 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1559 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1561 result = AudioDeviceStart( handle->id[0], callbackHandler );
1562 if ( result != noErr ) {
1563 errorStream_ << "RtApiCore::startStream: system error (" << getErrorCode( result ) << ") starting callback procedure on device (" << stream_.device[0] << ").";
1564 errorText_ = errorStream_.str();
1569 if ( stream_.mode == INPUT ||
1570 ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1572 result = AudioDeviceStart( handle->id[1], callbackHandler );
1573 if ( result != noErr ) {
1574 errorStream_ << "RtApiCore::startStream: system error starting input callback procedure on device (" << stream_.device[1] << ").";
1575 errorText_ = errorStream_.str();
1580 handle->drainCounter = 0;
1581 handle->internalDrain = false;
1582 stream_.state = STREAM_RUNNING;
1585 if ( result == noErr ) return;
1586 error( RtAudioError::SYSTEM_ERROR );
1589 void RtApiCore :: stopStream( void )
1592 if ( stream_.state == STREAM_STOPPED ) {
1593 errorText_ = "RtApiCore::stopStream(): the stream is already stopped!";
1594 error( RtAudioError::WARNING );
1598 OSStatus result = noErr;
1599 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1600 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1602 if ( handle->drainCounter == 0 ) {
1603 handle->drainCounter = 2;
1604 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
1607 result = AudioDeviceStop( handle->id[0], callbackHandler );
1608 if ( result != noErr ) {
1609 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping callback procedure on device (" << stream_.device[0] << ").";
1610 errorText_ = errorStream_.str();
1615 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1617 result = AudioDeviceStop( handle->id[1], callbackHandler );
1618 if ( result != noErr ) {
1619 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping input callback procedure on device (" << stream_.device[1] << ").";
1620 errorText_ = errorStream_.str();
1625 stream_.state = STREAM_STOPPED;
1628 if ( result == noErr ) return;
1629 error( RtAudioError::SYSTEM_ERROR );
1632 void RtApiCore :: abortStream( void )
1635 if ( stream_.state == STREAM_STOPPED ) {
1636 errorText_ = "RtApiCore::abortStream(): the stream is already stopped!";
1637 error( RtAudioError::WARNING );
1641 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1642 handle->drainCounter = 2;
1647 // This function will be called by a spawned thread when the user
1648 // callback function signals that the stream should be stopped or
1649 // aborted. It is better to handle it this way because the
1650 // callbackEvent() function probably should return before the AudioDeviceStop()
1651 // function is called.
1652 static void *coreStopStream( void *ptr )
1654 CallbackInfo *info = (CallbackInfo *) ptr;
1655 RtApiCore *object = (RtApiCore *) info->object;
1657 object->stopStream();
1658 pthread_exit( NULL );
1661 bool RtApiCore :: callbackEvent( AudioDeviceID deviceId,
1662 const AudioBufferList *inBufferList,
1663 const AudioBufferList *outBufferList )
1665 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
1666 if ( stream_.state == STREAM_CLOSED ) {
1667 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
1668 error( RtAudioError::WARNING );
1672 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
1673 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1675 // Check if we were draining the stream and signal is finished.
1676 if ( handle->drainCounter > 3 ) {
1677 ThreadHandle threadId;
1679 stream_.state = STREAM_STOPPING;
1680 if ( handle->internalDrain == true )
1681 pthread_create( &threadId, NULL, coreStopStream, info );
1682 else // external call to stopStream()
1683 pthread_cond_signal( &handle->condition );
1687 AudioDeviceID outputDevice = handle->id[0];
1689 // Invoke user callback to get fresh output data UNLESS we are
1690 // draining stream or duplex mode AND the input/output devices are
1691 // different AND this function is called for the input device.
1692 if ( handle->drainCounter == 0 && ( stream_.mode != DUPLEX || deviceId == outputDevice ) ) {
1693 RtAudioCallback callback = (RtAudioCallback) info->callback;
1694 double streamTime = getStreamTime();
1695 RtAudioStreamStatus status = 0;
1696 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
1697 status |= RTAUDIO_OUTPUT_UNDERFLOW;
1698 handle->xrun[0] = false;
1700 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
1701 status |= RTAUDIO_INPUT_OVERFLOW;
1702 handle->xrun[1] = false;
1705 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
1706 stream_.bufferSize, streamTime, status, info->userData );
1707 if ( cbReturnValue == 2 ) {
1708 stream_.state = STREAM_STOPPING;
1709 handle->drainCounter = 2;
1713 else if ( cbReturnValue == 1 ) {
1714 handle->drainCounter = 1;
1715 handle->internalDrain = true;
1719 if ( stream_.mode == OUTPUT || ( stream_.mode == DUPLEX && deviceId == outputDevice ) ) {
1721 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
1723 if ( handle->nStreams[0] == 1 ) {
1724 memset( outBufferList->mBuffers[handle->iStream[0]].mData,
1726 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
1728 else { // fill multiple streams with zeros
1729 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
1730 memset( outBufferList->mBuffers[handle->iStream[0]+i].mData,
1732 outBufferList->mBuffers[handle->iStream[0]+i].mDataByteSize );
1736 else if ( handle->nStreams[0] == 1 ) {
1737 if ( stream_.doConvertBuffer[0] ) { // convert directly to CoreAudio stream buffer
1738 convertBuffer( (char *) outBufferList->mBuffers[handle->iStream[0]].mData,
1739 stream_.userBuffer[0], stream_.convertInfo[0] );
1741 else { // copy from user buffer
1742 memcpy( outBufferList->mBuffers[handle->iStream[0]].mData,
1743 stream_.userBuffer[0],
1744 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
1747 else { // fill multiple streams
1748 Float32 *inBuffer = (Float32 *) stream_.userBuffer[0];
1749 if ( stream_.doConvertBuffer[0] ) {
1750 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
1751 inBuffer = (Float32 *) stream_.deviceBuffer;
1754 if ( stream_.deviceInterleaved[0] == false ) { // mono mode
1755 UInt32 bufferBytes = outBufferList->mBuffers[handle->iStream[0]].mDataByteSize;
1756 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
1757 memcpy( outBufferList->mBuffers[handle->iStream[0]+i].mData,
1758 (void *)&inBuffer[i*stream_.bufferSize], bufferBytes );
1761 else { // fill multiple multi-channel streams with interleaved data
1762 UInt32 streamChannels, channelsLeft, inJump, outJump, inOffset;
1765 bool inInterleaved = ( stream_.userInterleaved ) ? true : false;
1766 UInt32 inChannels = stream_.nUserChannels[0];
1767 if ( stream_.doConvertBuffer[0] ) {
1768 inInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
1769 inChannels = stream_.nDeviceChannels[0];
1772 if ( inInterleaved ) inOffset = 1;
1773 else inOffset = stream_.bufferSize;
1775 channelsLeft = inChannels;
1776 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
1778 out = (Float32 *) outBufferList->mBuffers[handle->iStream[0]+i].mData;
1779 streamChannels = outBufferList->mBuffers[handle->iStream[0]+i].mNumberChannels;
1782 // Account for possible channel offset in first stream
1783 if ( i == 0 && stream_.channelOffset[0] > 0 ) {
1784 streamChannels -= stream_.channelOffset[0];
1785 outJump = stream_.channelOffset[0];
1789 // Account for possible unfilled channels at end of the last stream
1790 if ( streamChannels > channelsLeft ) {
1791 outJump = streamChannels - channelsLeft;
1792 streamChannels = channelsLeft;
1795 // Determine input buffer offsets and skips
1796 if ( inInterleaved ) {
1797 inJump = inChannels;
1798 in += inChannels - channelsLeft;
1802 in += (inChannels - channelsLeft) * inOffset;
1805 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
1806 for ( unsigned int j=0; j<streamChannels; j++ ) {
1807 *out++ = in[j*inOffset];
1812 channelsLeft -= streamChannels;
1818 // Don't bother draining input
1819 if ( handle->drainCounter ) {
1820 handle->drainCounter++;
1824 AudioDeviceID inputDevice;
1825 inputDevice = handle->id[1];
1826 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && deviceId == inputDevice ) ) {
1828 if ( handle->nStreams[1] == 1 ) {
1829 if ( stream_.doConvertBuffer[1] ) { // convert directly from CoreAudio stream buffer
1830 convertBuffer( stream_.userBuffer[1],
1831 (char *) inBufferList->mBuffers[handle->iStream[1]].mData,
1832 stream_.convertInfo[1] );
1834 else { // copy to user buffer
1835 memcpy( stream_.userBuffer[1],
1836 inBufferList->mBuffers[handle->iStream[1]].mData,
1837 inBufferList->mBuffers[handle->iStream[1]].mDataByteSize );
1840 else { // read from multiple streams
1841 Float32 *outBuffer = (Float32 *) stream_.userBuffer[1];
1842 if ( stream_.doConvertBuffer[1] ) outBuffer = (Float32 *) stream_.deviceBuffer;
1844 if ( stream_.deviceInterleaved[1] == false ) { // mono mode
1845 UInt32 bufferBytes = inBufferList->mBuffers[handle->iStream[1]].mDataByteSize;
1846 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
1847 memcpy( (void *)&outBuffer[i*stream_.bufferSize],
1848 inBufferList->mBuffers[handle->iStream[1]+i].mData, bufferBytes );
1851 else { // read from multiple multi-channel streams
1852 UInt32 streamChannels, channelsLeft, inJump, outJump, outOffset;
1855 bool outInterleaved = ( stream_.userInterleaved ) ? true : false;
1856 UInt32 outChannels = stream_.nUserChannels[1];
1857 if ( stream_.doConvertBuffer[1] ) {
1858 outInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
1859 outChannels = stream_.nDeviceChannels[1];
1862 if ( outInterleaved ) outOffset = 1;
1863 else outOffset = stream_.bufferSize;
1865 channelsLeft = outChannels;
1866 for ( unsigned int i=0; i<handle->nStreams[1]; i++ ) {
1868 in = (Float32 *) inBufferList->mBuffers[handle->iStream[1]+i].mData;
1869 streamChannels = inBufferList->mBuffers[handle->iStream[1]+i].mNumberChannels;
1872 // Account for possible channel offset in first stream
1873 if ( i == 0 && stream_.channelOffset[1] > 0 ) {
1874 streamChannels -= stream_.channelOffset[1];
1875 inJump = stream_.channelOffset[1];
1879 // Account for possible unread channels at end of the last stream
1880 if ( streamChannels > channelsLeft ) {
1881 inJump = streamChannels - channelsLeft;
1882 streamChannels = channelsLeft;
1885 // Determine output buffer offsets and skips
1886 if ( outInterleaved ) {
1887 outJump = outChannels;
1888 out += outChannels - channelsLeft;
1892 out += (outChannels - channelsLeft) * outOffset;
1895 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
1896 for ( unsigned int j=0; j<streamChannels; j++ ) {
1897 out[j*outOffset] = *in++;
1902 channelsLeft -= streamChannels;
1906 if ( stream_.doConvertBuffer[1] ) { // convert from our internal "device" buffer
1907 convertBuffer( stream_.userBuffer[1],
1908 stream_.deviceBuffer,
1909 stream_.convertInfo[1] );
1915 //MUTEX_UNLOCK( &stream_.mutex );
1917 RtApi::tickStreamTime();
1921 const char* RtApiCore :: getErrorCode( OSStatus code )
1925 case kAudioHardwareNotRunningError:
1926 return "kAudioHardwareNotRunningError";
1928 case kAudioHardwareUnspecifiedError:
1929 return "kAudioHardwareUnspecifiedError";
1931 case kAudioHardwareUnknownPropertyError:
1932 return "kAudioHardwareUnknownPropertyError";
1934 case kAudioHardwareBadPropertySizeError:
1935 return "kAudioHardwareBadPropertySizeError";
1937 case kAudioHardwareIllegalOperationError:
1938 return "kAudioHardwareIllegalOperationError";
1940 case kAudioHardwareBadObjectError:
1941 return "kAudioHardwareBadObjectError";
1943 case kAudioHardwareBadDeviceError:
1944 return "kAudioHardwareBadDeviceError";
1946 case kAudioHardwareBadStreamError:
1947 return "kAudioHardwareBadStreamError";
1949 case kAudioHardwareUnsupportedOperationError:
1950 return "kAudioHardwareUnsupportedOperationError";
1952 case kAudioDeviceUnsupportedFormatError:
1953 return "kAudioDeviceUnsupportedFormatError";
1955 case kAudioDevicePermissionsError:
1956 return "kAudioDevicePermissionsError";
1959 return "CoreAudio unknown error";
1963 //******************** End of __MACOSX_CORE__ *********************//
1966 #if defined(__UNIX_JACK__)
1968 // JACK is a low-latency audio server, originally written for the
1969 // GNU/Linux operating system and now also ported to OS-X. It can
1970 // connect a number of different applications to an audio device, as
1971 // well as allowing them to share audio between themselves.
1973 // When using JACK with RtAudio, "devices" refer to JACK clients that
1974 // have ports connected to the server. The JACK server is typically
1975 // started in a terminal as follows:
1977 // .jackd -d alsa -d hw:0
1979 // or through an interface program such as qjackctl. Many of the
1980 // parameters normally set for a stream are fixed by the JACK server
1981 // and can be specified when the JACK server is started. In
1984 // .jackd -d alsa -d hw:0 -r 44100 -p 512 -n 4
1986 // specifies a sample rate of 44100 Hz, a buffer size of 512 sample
1987 // frames, and number of buffers = 4. Once the server is running, it
1988 // is not possible to override these values. If the values are not
1989 // specified in the command-line, the JACK server uses default values.
1991 // The JACK server does not have to be running when an instance of
1992 // RtApiJack is created, though the function getDeviceCount() will
1993 // report 0 devices found until JACK has been started. When no
1994 // devices are available (i.e., the JACK server is not running), a
1995 // stream cannot be opened.
1997 #include <jack/jack.h>
2001 // A structure to hold various information related to the Jack API
2004 jack_client_t *client;
2005 jack_port_t **ports[2];
2006 std::string deviceName[2];
2008 pthread_cond_t condition;
2009 int drainCounter; // Tracks callback counts when draining
2010 bool internalDrain; // Indicates if stop is initiated from callback or not.
2013 :client(0), drainCounter(0), internalDrain(false) { ports[0] = 0; ports[1] = 0; xrun[0] = false; xrun[1] = false; }
2016 #if !defined(__RTAUDIO_DEBUG__)
2017 static void jackSilentError( const char * ) {};
2020 RtApiJack :: RtApiJack()
2021 :shouldAutoconnect_(true) {
2022 // Nothing to do here.
2023 #if !defined(__RTAUDIO_DEBUG__)
2024 // Turn off Jack's internal error reporting.
2025 jack_set_error_function( &jackSilentError );
2029 RtApiJack :: ~RtApiJack()
2031 if ( stream_.state != STREAM_CLOSED ) closeStream();
2034 unsigned int RtApiJack :: getDeviceCount( void )
2036 // See if we can become a jack client.
2037 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
2038 jack_status_t *status = NULL;
2039 jack_client_t *client = jack_client_open( "RtApiJackCount", options, status );
2040 if ( client == 0 ) return 0;
2043 std::string port, previousPort;
2044 unsigned int nChannels = 0, nDevices = 0;
2045 ports = jack_get_ports( client, NULL, JACK_DEFAULT_AUDIO_TYPE, 0 );
2047 // Parse the port names up to the first colon (:).
2050 port = (char *) ports[ nChannels ];
2051 iColon = port.find(":");
2052 if ( iColon != std::string::npos ) {
2053 port = port.substr( 0, iColon + 1 );
2054 if ( port != previousPort ) {
2056 previousPort = port;
2059 } while ( ports[++nChannels] );
2063 jack_client_close( client );
2067 RtAudio::DeviceInfo RtApiJack :: getDeviceInfo( unsigned int device )
2069 RtAudio::DeviceInfo info;
2070 info.probed = false;
2072 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption
2073 jack_status_t *status = NULL;
2074 jack_client_t *client = jack_client_open( "RtApiJackInfo", options, status );
2075 if ( client == 0 ) {
2076 errorText_ = "RtApiJack::getDeviceInfo: Jack server not found or connection error!";
2077 error( RtAudioError::WARNING );
2082 std::string port, previousPort;
2083 unsigned int nPorts = 0, nDevices = 0;
2084 ports = jack_get_ports( client, NULL, JACK_DEFAULT_AUDIO_TYPE, 0 );
2086 // Parse the port names up to the first colon (:).
2089 port = (char *) ports[ nPorts ];
2090 iColon = port.find(":");
2091 if ( iColon != std::string::npos ) {
2092 port = port.substr( 0, iColon );
2093 if ( port != previousPort ) {
2094 if ( nDevices == device ) info.name = port;
2096 previousPort = port;
2099 } while ( ports[++nPorts] );
2103 if ( device >= nDevices ) {
2104 jack_client_close( client );
2105 errorText_ = "RtApiJack::getDeviceInfo: device ID is invalid!";
2106 error( RtAudioError::INVALID_USE );
2110 // Get the current jack server sample rate.
2111 info.sampleRates.clear();
2113 info.preferredSampleRate = jack_get_sample_rate( client );
2114 info.sampleRates.push_back( info.preferredSampleRate );
2116 // Count the available ports containing the client name as device
2117 // channels. Jack "input ports" equal RtAudio output channels.
2118 unsigned int nChannels = 0;
2119 ports = jack_get_ports( client, info.name.c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput );
2121 while ( ports[ nChannels ] ) nChannels++;
2123 info.outputChannels = nChannels;
2126 // Jack "output ports" equal RtAudio input channels.
2128 ports = jack_get_ports( client, info.name.c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput );
2130 while ( ports[ nChannels ] ) nChannels++;
2132 info.inputChannels = nChannels;
2135 if ( info.outputChannels == 0 && info.inputChannels == 0 ) {
2136 jack_client_close(client);
2137 errorText_ = "RtApiJack::getDeviceInfo: error determining Jack input/output channels!";
2138 error( RtAudioError::WARNING );
2142 // If device opens for both playback and capture, we determine the channels.
2143 if ( info.outputChannels > 0 && info.inputChannels > 0 )
2144 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
2146 // Jack always uses 32-bit floats.
2147 info.nativeFormats = RTAUDIO_FLOAT32;
2149 // Jack doesn't provide default devices so we'll use the first available one.
2150 if ( device == 0 && info.outputChannels > 0 )
2151 info.isDefaultOutput = true;
2152 if ( device == 0 && info.inputChannels > 0 )
2153 info.isDefaultInput = true;
2155 jack_client_close(client);
2160 static int jackCallbackHandler( jack_nframes_t nframes, void *infoPointer )
2162 CallbackInfo *info = (CallbackInfo *) infoPointer;
2164 RtApiJack *object = (RtApiJack *) info->object;
2165 if ( object->callbackEvent( (unsigned long) nframes ) == false ) return 1;
2170 // This function will be called by a spawned thread when the Jack
2171 // server signals that it is shutting down. It is necessary to handle
2172 // it this way because the jackShutdown() function must return before
2173 // the jack_deactivate() function (in closeStream()) will return.
2174 static void *jackCloseStream( void *ptr )
2176 CallbackInfo *info = (CallbackInfo *) ptr;
2177 RtApiJack *object = (RtApiJack *) info->object;
2179 object->closeStream();
2181 pthread_exit( NULL );
2183 static void jackShutdown( void *infoPointer )
2185 CallbackInfo *info = (CallbackInfo *) infoPointer;
2186 RtApiJack *object = (RtApiJack *) info->object;
2188 // Check current stream state. If stopped, then we'll assume this
2189 // was called as a result of a call to RtApiJack::stopStream (the
2190 // deactivation of a client handle causes this function to be called).
2191 // If not, we'll assume the Jack server is shutting down or some
2192 // other problem occurred and we should close the stream.
2193 if ( object->isStreamRunning() == false ) return;
2195 ThreadHandle threadId;
2196 pthread_create( &threadId, NULL, jackCloseStream, info );
2197 std::cerr << "\nRtApiJack: the Jack server is shutting down this client ... stream stopped and closed!!\n" << std::endl;
2200 static int jackXrun( void *infoPointer )
2202 JackHandle *handle = *((JackHandle **) infoPointer);
2204 if ( handle->ports[0] ) handle->xrun[0] = true;
2205 if ( handle->ports[1] ) handle->xrun[1] = true;
2210 bool RtApiJack :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
2211 unsigned int firstChannel, unsigned int sampleRate,
2212 RtAudioFormat format, unsigned int *bufferSize,
2213 RtAudio::StreamOptions *options )
2215 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2217 // Look for jack server and try to become a client (only do once per stream).
2218 jack_client_t *client = 0;
2219 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) {
2220 jack_options_t jackoptions = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
2221 jack_status_t *status = NULL;
2222 if ( options && !options->streamName.empty() )
2223 client = jack_client_open( options->streamName.c_str(), jackoptions, status );
2225 client = jack_client_open( "RtApiJack", jackoptions, status );
2226 if ( client == 0 ) {
2227 errorText_ = "RtApiJack::probeDeviceOpen: Jack server not found or connection error!";
2228 error( RtAudioError::WARNING );
2233 // The handle must have been created on an earlier pass.
2234 client = handle->client;
2238 std::string port, previousPort, deviceName;
2239 unsigned int nPorts = 0, nDevices = 0;
2240 ports = jack_get_ports( client, NULL, JACK_DEFAULT_AUDIO_TYPE, 0 );
2242 // Parse the port names up to the first colon (:).
2245 port = (char *) ports[ nPorts ];
2246 iColon = port.find(":");
2247 if ( iColon != std::string::npos ) {
2248 port = port.substr( 0, iColon );
2249 if ( port != previousPort ) {
2250 if ( nDevices == device ) deviceName = port;
2252 previousPort = port;
2255 } while ( ports[++nPorts] );
2259 if ( device >= nDevices ) {
2260 errorText_ = "RtApiJack::probeDeviceOpen: device ID is invalid!";
2264 unsigned long flag = JackPortIsInput;
2265 if ( mode == INPUT ) flag = JackPortIsOutput;
2267 if ( ! (options && (options->flags & RTAUDIO_JACK_DONT_CONNECT)) ) {
2268 // Count the available ports containing the client name as device
2269 // channels. Jack "input ports" equal RtAudio output channels.
2270 unsigned int nChannels = 0;
2271 ports = jack_get_ports( client, deviceName.c_str(), JACK_DEFAULT_AUDIO_TYPE, flag );
2273 while ( ports[ nChannels ] ) nChannels++;
2276 // Compare the jack ports for specified client to the requested number of channels.
2277 if ( nChannels < (channels + firstChannel) ) {
2278 errorStream_ << "RtApiJack::probeDeviceOpen: requested number of channels (" << channels << ") + offset (" << firstChannel << ") not found for specified device (" << device << ":" << deviceName << ").";
2279 errorText_ = errorStream_.str();
2284 // Check the jack server sample rate.
2285 unsigned int jackRate = jack_get_sample_rate( client );
2286 if ( sampleRate != jackRate ) {
2287 jack_client_close( client );
2288 errorStream_ << "RtApiJack::probeDeviceOpen: the requested sample rate (" << sampleRate << ") is different than the JACK server rate (" << jackRate << ").";
2289 errorText_ = errorStream_.str();
2292 stream_.sampleRate = jackRate;
2294 // Get the latency of the JACK port.
2295 ports = jack_get_ports( client, deviceName.c_str(), JACK_DEFAULT_AUDIO_TYPE, flag );
2296 if ( ports[ firstChannel ] ) {
2298 jack_latency_callback_mode_t cbmode = (mode == INPUT ? JackCaptureLatency : JackPlaybackLatency);
2299 // the range (usually the min and max are equal)
2300 jack_latency_range_t latrange; latrange.min = latrange.max = 0;
2301 // get the latency range
2302 jack_port_get_latency_range( jack_port_by_name( client, ports[firstChannel] ), cbmode, &latrange );
2303 // be optimistic, use the min!
2304 stream_.latency[mode] = latrange.min;
2305 //stream_.latency[mode] = jack_port_get_latency( jack_port_by_name( client, ports[ firstChannel ] ) );
2309 // The jack server always uses 32-bit floating-point data.
2310 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
2311 stream_.userFormat = format;
2313 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
2314 else stream_.userInterleaved = true;
2316 // Jack always uses non-interleaved buffers.
2317 stream_.deviceInterleaved[mode] = false;
2319 // Jack always provides host byte-ordered data.
2320 stream_.doByteSwap[mode] = false;
2322 // Get the buffer size. The buffer size and number of buffers
2323 // (periods) is set when the jack server is started.
2324 stream_.bufferSize = (int) jack_get_buffer_size( client );
2325 *bufferSize = stream_.bufferSize;
2327 stream_.nDeviceChannels[mode] = channels;
2328 stream_.nUserChannels[mode] = channels;
2330 // Set flags for buffer conversion.
2331 stream_.doConvertBuffer[mode] = false;
2332 if ( stream_.userFormat != stream_.deviceFormat[mode] )
2333 stream_.doConvertBuffer[mode] = true;
2334 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
2335 stream_.nUserChannels[mode] > 1 )
2336 stream_.doConvertBuffer[mode] = true;
2338 // Allocate our JackHandle structure for the stream.
2339 if ( handle == 0 ) {
2341 handle = new JackHandle;
2343 catch ( std::bad_alloc& ) {
2344 errorText_ = "RtApiJack::probeDeviceOpen: error allocating JackHandle memory.";
2348 if ( pthread_cond_init(&handle->condition, NULL) ) {
2349 errorText_ = "RtApiJack::probeDeviceOpen: error initializing pthread condition variable.";
2352 stream_.apiHandle = (void *) handle;
2353 handle->client = client;
2355 handle->deviceName[mode] = deviceName;
2357 // Allocate necessary internal buffers.
2358 unsigned long bufferBytes;
2359 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
2360 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
2361 if ( stream_.userBuffer[mode] == NULL ) {
2362 errorText_ = "RtApiJack::probeDeviceOpen: error allocating user buffer memory.";
2366 if ( stream_.doConvertBuffer[mode] ) {
2368 bool makeBuffer = true;
2369 if ( mode == OUTPUT )
2370 bufferBytes = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
2371 else { // mode == INPUT
2372 bufferBytes = stream_.nDeviceChannels[1] * formatBytes( stream_.deviceFormat[1] );
2373 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
2374 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes(stream_.deviceFormat[0]);
2375 if ( bufferBytes < bytesOut ) makeBuffer = false;
2380 bufferBytes *= *bufferSize;
2381 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
2382 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
2383 if ( stream_.deviceBuffer == NULL ) {
2384 errorText_ = "RtApiJack::probeDeviceOpen: error allocating device buffer memory.";
2390 // Allocate memory for the Jack ports (channels) identifiers.
2391 handle->ports[mode] = (jack_port_t **) malloc ( sizeof (jack_port_t *) * channels );
2392 if ( handle->ports[mode] == NULL ) {
2393 errorText_ = "RtApiJack::probeDeviceOpen: error allocating port memory.";
2397 stream_.device[mode] = device;
2398 stream_.channelOffset[mode] = firstChannel;
2399 stream_.state = STREAM_STOPPED;
2400 stream_.callbackInfo.object = (void *) this;
2402 if ( stream_.mode == OUTPUT && mode == INPUT )
2403 // We had already set up the stream for output.
2404 stream_.mode = DUPLEX;
2406 stream_.mode = mode;
2407 jack_set_process_callback( handle->client, jackCallbackHandler, (void *) &stream_.callbackInfo );
2408 jack_set_xrun_callback( handle->client, jackXrun, (void *) &stream_.apiHandle );
2409 jack_on_shutdown( handle->client, jackShutdown, (void *) &stream_.callbackInfo );
2412 // Register our ports.
2414 if ( mode == OUTPUT ) {
2415 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2416 snprintf( label, 64, "outport %d", i );
2417 handle->ports[0][i] = jack_port_register( handle->client, (const char *)label,
2418 JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput, 0 );
2422 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2423 snprintf( label, 64, "inport %d", i );
2424 handle->ports[1][i] = jack_port_register( handle->client, (const char *)label,
2425 JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput, 0 );
2429 // Setup the buffer conversion information structure. We don't use
2430 // buffers to do channel offsets, so we override that parameter
2432 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
2434 if ( options && options->flags & RTAUDIO_JACK_DONT_CONNECT ) shouldAutoconnect_ = false;
2440 pthread_cond_destroy( &handle->condition );
2441 jack_client_close( handle->client );
2443 if ( handle->ports[0] ) free( handle->ports[0] );
2444 if ( handle->ports[1] ) free( handle->ports[1] );
2447 stream_.apiHandle = 0;
2450 for ( int i=0; i<2; i++ ) {
2451 if ( stream_.userBuffer[i] ) {
2452 free( stream_.userBuffer[i] );
2453 stream_.userBuffer[i] = 0;
2457 if ( stream_.deviceBuffer ) {
2458 free( stream_.deviceBuffer );
2459 stream_.deviceBuffer = 0;
2465 void RtApiJack :: closeStream( void )
2467 if ( stream_.state == STREAM_CLOSED ) {
2468 errorText_ = "RtApiJack::closeStream(): no open stream to close!";
2469 error( RtAudioError::WARNING );
2473 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2476 if ( stream_.state == STREAM_RUNNING )
2477 jack_deactivate( handle->client );
2479 jack_client_close( handle->client );
2483 if ( handle->ports[0] ) free( handle->ports[0] );
2484 if ( handle->ports[1] ) free( handle->ports[1] );
2485 pthread_cond_destroy( &handle->condition );
2487 stream_.apiHandle = 0;
2490 for ( int i=0; i<2; i++ ) {
2491 if ( stream_.userBuffer[i] ) {
2492 free( stream_.userBuffer[i] );
2493 stream_.userBuffer[i] = 0;
2497 if ( stream_.deviceBuffer ) {
2498 free( stream_.deviceBuffer );
2499 stream_.deviceBuffer = 0;
2502 stream_.mode = UNINITIALIZED;
2503 stream_.state = STREAM_CLOSED;
2506 void RtApiJack :: startStream( void )
2509 RtApi::startStream();
2510 if ( stream_.state == STREAM_RUNNING ) {
2511 errorText_ = "RtApiJack::startStream(): the stream is already running!";
2512 error( RtAudioError::WARNING );
2516 #if defined( HAVE_GETTIMEOFDAY )
2517 gettimeofday( &stream_.lastTickTimestamp, NULL );
2520 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2521 int result = jack_activate( handle->client );
2523 errorText_ = "RtApiJack::startStream(): unable to activate JACK client!";
2529 // Get the list of available ports.
2530 if ( shouldAutoconnect_ && (stream_.mode == OUTPUT || stream_.mode == DUPLEX) ) {
2532 ports = jack_get_ports( handle->client, handle->deviceName[0].c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput);
2533 if ( ports == NULL) {
2534 errorText_ = "RtApiJack::startStream(): error determining available JACK input ports!";
2538 // Now make the port connections. Since RtAudio wasn't designed to
2539 // allow the user to select particular channels of a device, we'll
2540 // just open the first "nChannels" ports with offset.
2541 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2543 if ( ports[ stream_.channelOffset[0] + i ] )
2544 result = jack_connect( handle->client, jack_port_name( handle->ports[0][i] ), ports[ stream_.channelOffset[0] + i ] );
2547 errorText_ = "RtApiJack::startStream(): error connecting output ports!";
2554 if ( shouldAutoconnect_ && (stream_.mode == INPUT || stream_.mode == DUPLEX) ) {
2556 ports = jack_get_ports( handle->client, handle->deviceName[1].c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput );
2557 if ( ports == NULL) {
2558 errorText_ = "RtApiJack::startStream(): error determining available JACK output ports!";
2562 // Now make the port connections. See note above.
2563 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2565 if ( ports[ stream_.channelOffset[1] + i ] )
2566 result = jack_connect( handle->client, ports[ stream_.channelOffset[1] + i ], jack_port_name( handle->ports[1][i] ) );
2569 errorText_ = "RtApiJack::startStream(): error connecting input ports!";
2576 handle->drainCounter = 0;
2577 handle->internalDrain = false;
2578 stream_.state = STREAM_RUNNING;
2581 if ( result == 0 ) return;
2582 error( RtAudioError::SYSTEM_ERROR );
2585 void RtApiJack :: stopStream( void )
2588 if ( stream_.state == STREAM_STOPPED ) {
2589 errorText_ = "RtApiJack::stopStream(): the stream is already stopped!";
2590 error( RtAudioError::WARNING );
2594 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2595 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
2597 if ( handle->drainCounter == 0 ) {
2598 handle->drainCounter = 2;
2599 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
2603 jack_deactivate( handle->client );
2604 stream_.state = STREAM_STOPPED;
2607 void RtApiJack :: abortStream( void )
2610 if ( stream_.state == STREAM_STOPPED ) {
2611 errorText_ = "RtApiJack::abortStream(): the stream is already stopped!";
2612 error( RtAudioError::WARNING );
2616 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2617 handle->drainCounter = 2;
2622 // This function will be called by a spawned thread when the user
2623 // callback function signals that the stream should be stopped or
2624 // aborted. It is necessary to handle it this way because the
2625 // callbackEvent() function must return before the jack_deactivate()
2626 // function will return.
2627 static void *jackStopStream( void *ptr )
2629 CallbackInfo *info = (CallbackInfo *) ptr;
2630 RtApiJack *object = (RtApiJack *) info->object;
2632 object->stopStream();
2633 pthread_exit( NULL );
2636 bool RtApiJack :: callbackEvent( unsigned long nframes )
2638 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
2639 if ( stream_.state == STREAM_CLOSED ) {
2640 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
2641 error( RtAudioError::WARNING );
2644 if ( stream_.bufferSize != nframes ) {
2645 errorText_ = "RtApiCore::callbackEvent(): the JACK buffer size has changed ... cannot process!";
2646 error( RtAudioError::WARNING );
2650 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
2651 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2653 // Check if we were draining the stream and signal is finished.
2654 if ( handle->drainCounter > 3 ) {
2655 ThreadHandle threadId;
2657 stream_.state = STREAM_STOPPING;
2658 if ( handle->internalDrain == true )
2659 pthread_create( &threadId, NULL, jackStopStream, info );
2661 pthread_cond_signal( &handle->condition );
2665 // Invoke user callback first, to get fresh output data.
2666 if ( handle->drainCounter == 0 ) {
2667 RtAudioCallback callback = (RtAudioCallback) info->callback;
2668 double streamTime = getStreamTime();
2669 RtAudioStreamStatus status = 0;
2670 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
2671 status |= RTAUDIO_OUTPUT_UNDERFLOW;
2672 handle->xrun[0] = false;
2674 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
2675 status |= RTAUDIO_INPUT_OVERFLOW;
2676 handle->xrun[1] = false;
2678 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
2679 stream_.bufferSize, streamTime, status, info->userData );
2680 if ( cbReturnValue == 2 ) {
2681 stream_.state = STREAM_STOPPING;
2682 handle->drainCounter = 2;
2684 pthread_create( &id, NULL, jackStopStream, info );
2687 else if ( cbReturnValue == 1 ) {
2688 handle->drainCounter = 1;
2689 handle->internalDrain = true;
2693 jack_default_audio_sample_t *jackbuffer;
2694 unsigned long bufferBytes = nframes * sizeof( jack_default_audio_sample_t );
2695 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
2697 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
2699 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
2700 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2701 memset( jackbuffer, 0, bufferBytes );
2705 else if ( stream_.doConvertBuffer[0] ) {
2707 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
2709 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
2710 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2711 memcpy( jackbuffer, &stream_.deviceBuffer[i*bufferBytes], bufferBytes );
2714 else { // no buffer conversion
2715 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2716 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2717 memcpy( jackbuffer, &stream_.userBuffer[0][i*bufferBytes], bufferBytes );
2722 // Don't bother draining input
2723 if ( handle->drainCounter ) {
2724 handle->drainCounter++;
2728 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
2730 if ( stream_.doConvertBuffer[1] ) {
2731 for ( unsigned int i=0; i<stream_.nDeviceChannels[1]; i++ ) {
2732 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
2733 memcpy( &stream_.deviceBuffer[i*bufferBytes], jackbuffer, bufferBytes );
2735 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
2737 else { // no buffer conversion
2738 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2739 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
2740 memcpy( &stream_.userBuffer[1][i*bufferBytes], jackbuffer, bufferBytes );
2746 RtApi::tickStreamTime();
2749 //******************** End of __UNIX_JACK__ *********************//
2752 #if defined(__WINDOWS_ASIO__) // ASIO API on Windows
2754 // The ASIO API is designed around a callback scheme, so this
2755 // implementation is similar to that used for OS-X CoreAudio and Linux
2756 // Jack. The primary constraint with ASIO is that it only allows
2757 // access to a single driver at a time. Thus, it is not possible to
2758 // have more than one simultaneous RtAudio stream.
2760 // This implementation also requires a number of external ASIO files
2761 // and a few global variables. The ASIO callback scheme does not
2762 // allow for the passing of user data, so we must create a global
2763 // pointer to our callbackInfo structure.
2765 // On unix systems, we make use of a pthread condition variable.
2766 // Since there is no equivalent in Windows, I hacked something based
2767 // on information found in
2768 // http://www.cs.wustl.edu/~schmidt/win32-cv-1.html.
2770 #include "asiosys.h"
2772 #include "iasiothiscallresolver.h"
2773 #include "asiodrivers.h"
2776 static AsioDrivers drivers;
2777 static ASIOCallbacks asioCallbacks;
2778 static ASIODriverInfo driverInfo;
2779 static CallbackInfo *asioCallbackInfo;
2780 static bool asioXRun;
2783 int drainCounter; // Tracks callback counts when draining
2784 bool internalDrain; // Indicates if stop is initiated from callback or not.
2785 ASIOBufferInfo *bufferInfos;
2789 :drainCounter(0), internalDrain(false), bufferInfos(0) {}
2792 // Function declarations (definitions at end of section)
2793 static const char* getAsioErrorString( ASIOError result );
2794 static void sampleRateChanged( ASIOSampleRate sRate );
2795 static long asioMessages( long selector, long value, void* message, double* opt );
2797 RtApiAsio :: RtApiAsio()
2799 // ASIO cannot run on a multi-threaded appartment. You can call
2800 // CoInitialize beforehand, but it must be for appartment threading
2801 // (in which case, CoInitilialize will return S_FALSE here).
2802 coInitialized_ = false;
2803 HRESULT hr = CoInitialize( NULL );
2805 errorText_ = "RtApiAsio::ASIO requires a single-threaded appartment. Call CoInitializeEx(0,COINIT_APARTMENTTHREADED)";
2806 error( RtAudioError::WARNING );
2808 coInitialized_ = true;
2810 drivers.removeCurrentDriver();
2811 driverInfo.asioVersion = 2;
2813 // See note in DirectSound implementation about GetDesktopWindow().
2814 driverInfo.sysRef = GetForegroundWindow();
2817 RtApiAsio :: ~RtApiAsio()
2819 if ( stream_.state != STREAM_CLOSED ) closeStream();
2820 if ( coInitialized_ ) CoUninitialize();
2823 unsigned int RtApiAsio :: getDeviceCount( void )
2825 return (unsigned int) drivers.asioGetNumDev();
2828 RtAudio::DeviceInfo RtApiAsio :: getDeviceInfo( unsigned int device )
2830 RtAudio::DeviceInfo info;
2831 info.probed = false;
2834 unsigned int nDevices = getDeviceCount();
2835 if ( nDevices == 0 ) {
2836 errorText_ = "RtApiAsio::getDeviceInfo: no devices found!";
2837 error( RtAudioError::INVALID_USE );
2841 if ( device >= nDevices ) {
2842 errorText_ = "RtApiAsio::getDeviceInfo: device ID is invalid!";
2843 error( RtAudioError::INVALID_USE );
2847 // If a stream is already open, we cannot probe other devices. Thus, use the saved results.
2848 if ( stream_.state != STREAM_CLOSED ) {
2849 if ( device >= devices_.size() ) {
2850 errorText_ = "RtApiAsio::getDeviceInfo: device ID was not present before stream was opened.";
2851 error( RtAudioError::WARNING );
2854 return devices_[ device ];
2857 char driverName[32];
2858 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
2859 if ( result != ASE_OK ) {
2860 errorStream_ << "RtApiAsio::getDeviceInfo: unable to get driver name (" << getAsioErrorString( result ) << ").";
2861 errorText_ = errorStream_.str();
2862 error( RtAudioError::WARNING );
2866 info.name = driverName;
2868 if ( !drivers.loadDriver( driverName ) ) {
2869 errorStream_ << "RtApiAsio::getDeviceInfo: unable to load driver (" << driverName << ").";
2870 errorText_ = errorStream_.str();
2871 error( RtAudioError::WARNING );
2875 result = ASIOInit( &driverInfo );
2876 if ( result != ASE_OK ) {
2877 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
2878 errorText_ = errorStream_.str();
2879 error( RtAudioError::WARNING );
2883 // Determine the device channel information.
2884 long inputChannels, outputChannels;
2885 result = ASIOGetChannels( &inputChannels, &outputChannels );
2886 if ( result != ASE_OK ) {
2887 drivers.removeCurrentDriver();
2888 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
2889 errorText_ = errorStream_.str();
2890 error( RtAudioError::WARNING );
2894 info.outputChannels = outputChannels;
2895 info.inputChannels = inputChannels;
2896 if ( info.outputChannels > 0 && info.inputChannels > 0 )
2897 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
2899 // Determine the supported sample rates.
2900 info.sampleRates.clear();
2901 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
2902 result = ASIOCanSampleRate( (ASIOSampleRate) SAMPLE_RATES[i] );
2903 if ( result == ASE_OK ) {
2904 info.sampleRates.push_back( SAMPLE_RATES[i] );
2906 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
2907 info.preferredSampleRate = SAMPLE_RATES[i];
2911 // Determine supported data types ... just check first channel and assume rest are the same.
2912 ASIOChannelInfo channelInfo;
2913 channelInfo.channel = 0;
2914 channelInfo.isInput = true;
2915 if ( info.inputChannels <= 0 ) channelInfo.isInput = false;
2916 result = ASIOGetChannelInfo( &channelInfo );
2917 if ( result != ASE_OK ) {
2918 drivers.removeCurrentDriver();
2919 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting driver channel info (" << driverName << ").";
2920 errorText_ = errorStream_.str();
2921 error( RtAudioError::WARNING );
2925 info.nativeFormats = 0;
2926 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB )
2927 info.nativeFormats |= RTAUDIO_SINT16;
2928 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB )
2929 info.nativeFormats |= RTAUDIO_SINT32;
2930 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB )
2931 info.nativeFormats |= RTAUDIO_FLOAT32;
2932 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB )
2933 info.nativeFormats |= RTAUDIO_FLOAT64;
2934 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB )
2935 info.nativeFormats |= RTAUDIO_SINT24;
2937 if ( info.outputChannels > 0 )
2938 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
2939 if ( info.inputChannels > 0 )
2940 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
2943 drivers.removeCurrentDriver();
2947 static void bufferSwitch( long index, ASIOBool /*processNow*/ )
2949 RtApiAsio *object = (RtApiAsio *) asioCallbackInfo->object;
2950 object->callbackEvent( index );
2953 void RtApiAsio :: saveDeviceInfo( void )
2957 unsigned int nDevices = getDeviceCount();
2958 devices_.resize( nDevices );
2959 for ( unsigned int i=0; i<nDevices; i++ )
2960 devices_[i] = getDeviceInfo( i );
2963 bool RtApiAsio :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
2964 unsigned int firstChannel, unsigned int sampleRate,
2965 RtAudioFormat format, unsigned int *bufferSize,
2966 RtAudio::StreamOptions *options )
2967 {////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
2969 bool isDuplexInput = mode == INPUT && stream_.mode == OUTPUT;
2971 // For ASIO, a duplex stream MUST use the same driver.
2972 if ( isDuplexInput && stream_.device[0] != device ) {
2973 errorText_ = "RtApiAsio::probeDeviceOpen: an ASIO duplex stream must use the same device for input and output!";
2977 char driverName[32];
2978 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
2979 if ( result != ASE_OK ) {
2980 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to get driver name (" << getAsioErrorString( result ) << ").";
2981 errorText_ = errorStream_.str();
2985 // Only load the driver once for duplex stream.
2986 if ( !isDuplexInput ) {
2987 // The getDeviceInfo() function will not work when a stream is open
2988 // because ASIO does not allow multiple devices to run at the same
2989 // time. Thus, we'll probe the system before opening a stream and
2990 // save the results for use by getDeviceInfo().
2991 this->saveDeviceInfo();
2993 if ( !drivers.loadDriver( driverName ) ) {
2994 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to load driver (" << driverName << ").";
2995 errorText_ = errorStream_.str();
2999 result = ASIOInit( &driverInfo );
3000 if ( result != ASE_OK ) {
3001 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
3002 errorText_ = errorStream_.str();
3007 // keep them before any "goto error", they are used for error cleanup + goto device boundary checks
3008 bool buffersAllocated = false;
3009 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3010 unsigned int nChannels;
3013 // Check the device channel count.
3014 long inputChannels, outputChannels;
3015 result = ASIOGetChannels( &inputChannels, &outputChannels );
3016 if ( result != ASE_OK ) {
3017 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
3018 errorText_ = errorStream_.str();
3022 if ( ( mode == OUTPUT && (channels+firstChannel) > (unsigned int) outputChannels) ||
3023 ( mode == INPUT && (channels+firstChannel) > (unsigned int) inputChannels) ) {
3024 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested channel count (" << channels << ") + offset (" << firstChannel << ").";
3025 errorText_ = errorStream_.str();
3028 stream_.nDeviceChannels[mode] = channels;
3029 stream_.nUserChannels[mode] = channels;
3030 stream_.channelOffset[mode] = firstChannel;
3032 // Verify the sample rate is supported.
3033 result = ASIOCanSampleRate( (ASIOSampleRate) sampleRate );
3034 if ( result != ASE_OK ) {
3035 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested sample rate (" << sampleRate << ").";
3036 errorText_ = errorStream_.str();
3040 // Get the current sample rate
3041 ASIOSampleRate currentRate;
3042 result = ASIOGetSampleRate( ¤tRate );
3043 if ( result != ASE_OK ) {
3044 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error getting sample rate.";
3045 errorText_ = errorStream_.str();
3049 // Set the sample rate only if necessary
3050 if ( currentRate != sampleRate ) {
3051 result = ASIOSetSampleRate( (ASIOSampleRate) sampleRate );
3052 if ( result != ASE_OK ) {
3053 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error setting sample rate (" << sampleRate << ").";
3054 errorText_ = errorStream_.str();
3059 // Determine the driver data type.
3060 ASIOChannelInfo channelInfo;
3061 channelInfo.channel = 0;
3062 if ( mode == OUTPUT ) channelInfo.isInput = false;
3063 else channelInfo.isInput = true;
3064 result = ASIOGetChannelInfo( &channelInfo );
3065 if ( result != ASE_OK ) {
3066 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting data format.";
3067 errorText_ = errorStream_.str();
3071 // Assuming WINDOWS host is always little-endian.
3072 stream_.doByteSwap[mode] = false;
3073 stream_.userFormat = format;
3074 stream_.deviceFormat[mode] = 0;
3075 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB ) {
3076 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
3077 if ( channelInfo.type == ASIOSTInt16MSB ) stream_.doByteSwap[mode] = true;
3079 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB ) {
3080 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
3081 if ( channelInfo.type == ASIOSTInt32MSB ) stream_.doByteSwap[mode] = true;
3083 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB ) {
3084 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
3085 if ( channelInfo.type == ASIOSTFloat32MSB ) stream_.doByteSwap[mode] = true;
3087 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB ) {
3088 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
3089 if ( channelInfo.type == ASIOSTFloat64MSB ) stream_.doByteSwap[mode] = true;
3091 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB ) {
3092 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
3093 if ( channelInfo.type == ASIOSTInt24MSB ) stream_.doByteSwap[mode] = true;
3096 if ( stream_.deviceFormat[mode] == 0 ) {
3097 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") data format not supported by RtAudio.";
3098 errorText_ = errorStream_.str();
3102 // Set the buffer size. For a duplex stream, this will end up
3103 // setting the buffer size based on the input constraints, which
3105 long minSize, maxSize, preferSize, granularity;
3106 result = ASIOGetBufferSize( &minSize, &maxSize, &preferSize, &granularity );
3107 if ( result != ASE_OK ) {
3108 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting buffer size.";
3109 errorText_ = errorStream_.str();
3113 if ( isDuplexInput ) {
3114 // When this is the duplex input (output was opened before), then we have to use the same
3115 // buffersize as the output, because it might use the preferred buffer size, which most
3116 // likely wasn't passed as input to this. The buffer sizes have to be identically anyway,
3117 // So instead of throwing an error, make them equal. The caller uses the reference
3118 // to the "bufferSize" param as usual to set up processing buffers.
3120 *bufferSize = stream_.bufferSize;
3123 if ( *bufferSize == 0 ) *bufferSize = preferSize;
3124 else if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
3125 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
3126 else if ( granularity == -1 ) {
3127 // Make sure bufferSize is a power of two.
3128 int log2_of_min_size = 0;
3129 int log2_of_max_size = 0;
3131 for ( unsigned int i = 0; i < sizeof(long) * 8; i++ ) {
3132 if ( minSize & ((long)1 << i) ) log2_of_min_size = i;
3133 if ( maxSize & ((long)1 << i) ) log2_of_max_size = i;
3136 long min_delta = std::abs( (long)*bufferSize - ((long)1 << log2_of_min_size) );
3137 int min_delta_num = log2_of_min_size;
3139 for (int i = log2_of_min_size + 1; i <= log2_of_max_size; i++) {
3140 long current_delta = std::abs( (long)*bufferSize - ((long)1 << i) );
3141 if (current_delta < min_delta) {
3142 min_delta = current_delta;
3147 *bufferSize = ( (unsigned int)1 << min_delta_num );
3148 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
3149 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
3151 else if ( granularity != 0 ) {
3152 // Set to an even multiple of granularity, rounding up.
3153 *bufferSize = (*bufferSize + granularity-1) / granularity * granularity;
3158 // we don't use it anymore, see above!
3159 // Just left it here for the case...
3160 if ( isDuplexInput && stream_.bufferSize != *bufferSize ) {
3161 errorText_ = "RtApiAsio::probeDeviceOpen: input/output buffersize discrepancy!";
3166 stream_.bufferSize = *bufferSize;
3167 stream_.nBuffers = 2;
3169 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
3170 else stream_.userInterleaved = true;
3172 // ASIO always uses non-interleaved buffers.
3173 stream_.deviceInterleaved[mode] = false;
3175 // Allocate, if necessary, our AsioHandle structure for the stream.
3176 if ( handle == 0 ) {
3178 handle = new AsioHandle;
3180 catch ( std::bad_alloc& ) {
3181 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating AsioHandle memory.";
3184 handle->bufferInfos = 0;
3186 // Create a manual-reset event.
3187 handle->condition = CreateEvent( NULL, // no security
3188 TRUE, // manual-reset
3189 FALSE, // non-signaled initially
3191 stream_.apiHandle = (void *) handle;
3194 // Create the ASIO internal buffers. Since RtAudio sets up input
3195 // and output separately, we'll have to dispose of previously
3196 // created output buffers for a duplex stream.
3197 if ( mode == INPUT && stream_.mode == OUTPUT ) {
3198 ASIODisposeBuffers();
3199 if ( handle->bufferInfos ) free( handle->bufferInfos );
3202 // Allocate, initialize, and save the bufferInfos in our stream callbackInfo structure.
3204 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
3205 handle->bufferInfos = (ASIOBufferInfo *) malloc( nChannels * sizeof(ASIOBufferInfo) );
3206 if ( handle->bufferInfos == NULL ) {
3207 errorStream_ << "RtApiAsio::probeDeviceOpen: error allocating bufferInfo memory for driver (" << driverName << ").";
3208 errorText_ = errorStream_.str();
3212 ASIOBufferInfo *infos;
3213 infos = handle->bufferInfos;
3214 for ( i=0; i<stream_.nDeviceChannels[0]; i++, infos++ ) {
3215 infos->isInput = ASIOFalse;
3216 infos->channelNum = i + stream_.channelOffset[0];
3217 infos->buffers[0] = infos->buffers[1] = 0;
3219 for ( i=0; i<stream_.nDeviceChannels[1]; i++, infos++ ) {
3220 infos->isInput = ASIOTrue;
3221 infos->channelNum = i + stream_.channelOffset[1];
3222 infos->buffers[0] = infos->buffers[1] = 0;
3225 // prepare for callbacks
3226 stream_.sampleRate = sampleRate;
3227 stream_.device[mode] = device;
3228 stream_.mode = isDuplexInput ? DUPLEX : mode;
3230 // store this class instance before registering callbacks, that are going to use it
3231 asioCallbackInfo = &stream_.callbackInfo;
3232 stream_.callbackInfo.object = (void *) this;
3234 // Set up the ASIO callback structure and create the ASIO data buffers.
3235 asioCallbacks.bufferSwitch = &bufferSwitch;
3236 asioCallbacks.sampleRateDidChange = &sampleRateChanged;
3237 asioCallbacks.asioMessage = &asioMessages;
3238 asioCallbacks.bufferSwitchTimeInfo = NULL;
3239 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
3240 if ( result != ASE_OK ) {
3241 // Standard method failed. This can happen with strict/misbehaving drivers that return valid buffer size ranges
3242 // but only accept the preferred buffer size as parameter for ASIOCreateBuffers (e.g. Creative's ASIO driver).
3243 // In that case, let's be naïve and try that instead.
3244 *bufferSize = preferSize;
3245 stream_.bufferSize = *bufferSize;
3246 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
3249 if ( result != ASE_OK ) {
3250 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") creating buffers.";
3251 errorText_ = errorStream_.str();
3254 buffersAllocated = true;
3255 stream_.state = STREAM_STOPPED;
3257 // Set flags for buffer conversion.
3258 stream_.doConvertBuffer[mode] = false;
3259 if ( stream_.userFormat != stream_.deviceFormat[mode] )
3260 stream_.doConvertBuffer[mode] = true;
3261 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
3262 stream_.nUserChannels[mode] > 1 )
3263 stream_.doConvertBuffer[mode] = true;
3265 // Allocate necessary internal buffers
3266 unsigned long bufferBytes;
3267 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
3268 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
3269 if ( stream_.userBuffer[mode] == NULL ) {
3270 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating user buffer memory.";
3274 if ( stream_.doConvertBuffer[mode] ) {
3276 bool makeBuffer = true;
3277 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
3278 if ( isDuplexInput && stream_.deviceBuffer ) {
3279 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
3280 if ( bufferBytes <= bytesOut ) makeBuffer = false;
3284 bufferBytes *= *bufferSize;
3285 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
3286 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
3287 if ( stream_.deviceBuffer == NULL ) {
3288 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating device buffer memory.";
3294 // Determine device latencies
3295 long inputLatency, outputLatency;
3296 result = ASIOGetLatencies( &inputLatency, &outputLatency );
3297 if ( result != ASE_OK ) {
3298 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting latency.";
3299 errorText_ = errorStream_.str();
3300 error( RtAudioError::WARNING); // warn but don't fail
3303 stream_.latency[0] = outputLatency;
3304 stream_.latency[1] = inputLatency;
3307 // Setup the buffer conversion information structure. We don't use
3308 // buffers to do channel offsets, so we override that parameter
3310 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
3315 if ( !isDuplexInput ) {
3316 // the cleanup for error in the duplex input, is done by RtApi::openStream
3317 // So we clean up for single channel only
3319 if ( buffersAllocated )
3320 ASIODisposeBuffers();
3322 drivers.removeCurrentDriver();
3325 CloseHandle( handle->condition );
3326 if ( handle->bufferInfos )
3327 free( handle->bufferInfos );
3330 stream_.apiHandle = 0;
3334 if ( stream_.userBuffer[mode] ) {
3335 free( stream_.userBuffer[mode] );
3336 stream_.userBuffer[mode] = 0;
3339 if ( stream_.deviceBuffer ) {
3340 free( stream_.deviceBuffer );
3341 stream_.deviceBuffer = 0;
3346 }////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
3348 void RtApiAsio :: closeStream()
3350 if ( stream_.state == STREAM_CLOSED ) {
3351 errorText_ = "RtApiAsio::closeStream(): no open stream to close!";
3352 error( RtAudioError::WARNING );
3356 if ( stream_.state == STREAM_RUNNING ) {
3357 stream_.state = STREAM_STOPPED;
3360 ASIODisposeBuffers();
3361 drivers.removeCurrentDriver();
3363 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3365 CloseHandle( handle->condition );
3366 if ( handle->bufferInfos )
3367 free( handle->bufferInfos );
3369 stream_.apiHandle = 0;
3372 for ( int i=0; i<2; i++ ) {
3373 if ( stream_.userBuffer[i] ) {
3374 free( stream_.userBuffer[i] );
3375 stream_.userBuffer[i] = 0;
3379 if ( stream_.deviceBuffer ) {
3380 free( stream_.deviceBuffer );
3381 stream_.deviceBuffer = 0;
3384 stream_.mode = UNINITIALIZED;
3385 stream_.state = STREAM_CLOSED;
3388 bool stopThreadCalled = false;
3390 void RtApiAsio :: startStream()
3393 RtApi::startStream();
3394 if ( stream_.state == STREAM_RUNNING ) {
3395 errorText_ = "RtApiAsio::startStream(): the stream is already running!";
3396 error( RtAudioError::WARNING );
3400 #if defined( HAVE_GETTIMEOFDAY )
3401 gettimeofday( &stream_.lastTickTimestamp, NULL );
3404 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3405 ASIOError result = ASIOStart();
3406 if ( result != ASE_OK ) {
3407 errorStream_ << "RtApiAsio::startStream: error (" << getAsioErrorString( result ) << ") starting device.";
3408 errorText_ = errorStream_.str();
3412 handle->drainCounter = 0;
3413 handle->internalDrain = false;
3414 ResetEvent( handle->condition );
3415 stream_.state = STREAM_RUNNING;
3419 stopThreadCalled = false;
3421 if ( result == ASE_OK ) return;
3422 error( RtAudioError::SYSTEM_ERROR );
3425 void RtApiAsio :: stopStream()
3428 if ( stream_.state == STREAM_STOPPED ) {
3429 errorText_ = "RtApiAsio::stopStream(): the stream is already stopped!";
3430 error( RtAudioError::WARNING );
3434 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3435 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
3436 if ( handle->drainCounter == 0 ) {
3437 handle->drainCounter = 2;
3438 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
3442 stream_.state = STREAM_STOPPED;
3444 ASIOError result = ASIOStop();
3445 if ( result != ASE_OK ) {
3446 errorStream_ << "RtApiAsio::stopStream: error (" << getAsioErrorString( result ) << ") stopping device.";
3447 errorText_ = errorStream_.str();
3450 if ( result == ASE_OK ) return;
3451 error( RtAudioError::SYSTEM_ERROR );
3454 void RtApiAsio :: abortStream()
3457 if ( stream_.state == STREAM_STOPPED ) {
3458 errorText_ = "RtApiAsio::abortStream(): the stream is already stopped!";
3459 error( RtAudioError::WARNING );
3463 // The following lines were commented-out because some behavior was
3464 // noted where the device buffers need to be zeroed to avoid
3465 // continuing sound, even when the device buffers are completely
3466 // disposed. So now, calling abort is the same as calling stop.
3467 // AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3468 // handle->drainCounter = 2;
3472 // This function will be called by a spawned thread when the user
3473 // callback function signals that the stream should be stopped or
3474 // aborted. It is necessary to handle it this way because the
3475 // callbackEvent() function must return before the ASIOStop()
3476 // function will return.
3477 static unsigned __stdcall asioStopStream( void *ptr )
3479 CallbackInfo *info = (CallbackInfo *) ptr;
3480 RtApiAsio *object = (RtApiAsio *) info->object;
3482 object->stopStream();
3487 bool RtApiAsio :: callbackEvent( long bufferIndex )
3489 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
3490 if ( stream_.state == STREAM_CLOSED ) {
3491 errorText_ = "RtApiAsio::callbackEvent(): the stream is closed ... this shouldn't happen!";
3492 error( RtAudioError::WARNING );
3496 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
3497 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3499 // Check if we were draining the stream and signal if finished.
3500 if ( handle->drainCounter > 3 ) {
3502 stream_.state = STREAM_STOPPING;
3503 if ( handle->internalDrain == false )
3504 SetEvent( handle->condition );
3505 else { // spawn a thread to stop the stream
3507 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
3508 &stream_.callbackInfo, 0, &threadId );
3513 // Invoke user callback to get fresh output data UNLESS we are
3515 if ( handle->drainCounter == 0 ) {
3516 RtAudioCallback callback = (RtAudioCallback) info->callback;
3517 double streamTime = getStreamTime();
3518 RtAudioStreamStatus status = 0;
3519 if ( stream_.mode != INPUT && asioXRun == true ) {
3520 status |= RTAUDIO_OUTPUT_UNDERFLOW;
3523 if ( stream_.mode != OUTPUT && asioXRun == true ) {
3524 status |= RTAUDIO_INPUT_OVERFLOW;
3527 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
3528 stream_.bufferSize, streamTime, status, info->userData );
3529 if ( cbReturnValue == 2 ) {
3530 stream_.state = STREAM_STOPPING;
3531 handle->drainCounter = 2;
3533 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
3534 &stream_.callbackInfo, 0, &threadId );
3537 else if ( cbReturnValue == 1 ) {
3538 handle->drainCounter = 1;
3539 handle->internalDrain = true;
3543 unsigned int nChannels, bufferBytes, i, j;
3544 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
3545 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
3547 bufferBytes = stream_.bufferSize * formatBytes( stream_.deviceFormat[0] );
3549 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
3551 for ( i=0, j=0; i<nChannels; i++ ) {
3552 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3553 memset( handle->bufferInfos[i].buffers[bufferIndex], 0, bufferBytes );
3557 else if ( stream_.doConvertBuffer[0] ) {
3559 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
3560 if ( stream_.doByteSwap[0] )
3561 byteSwapBuffer( stream_.deviceBuffer,
3562 stream_.bufferSize * stream_.nDeviceChannels[0],
3563 stream_.deviceFormat[0] );
3565 for ( i=0, j=0; i<nChannels; i++ ) {
3566 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3567 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
3568 &stream_.deviceBuffer[j++*bufferBytes], bufferBytes );
3574 if ( stream_.doByteSwap[0] )
3575 byteSwapBuffer( stream_.userBuffer[0],
3576 stream_.bufferSize * stream_.nUserChannels[0],
3577 stream_.userFormat );
3579 for ( i=0, j=0; i<nChannels; i++ ) {
3580 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3581 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
3582 &stream_.userBuffer[0][bufferBytes*j++], bufferBytes );
3588 // Don't bother draining input
3589 if ( handle->drainCounter ) {
3590 handle->drainCounter++;
3594 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
3596 bufferBytes = stream_.bufferSize * formatBytes(stream_.deviceFormat[1]);
3598 if (stream_.doConvertBuffer[1]) {
3600 // Always interleave ASIO input data.
3601 for ( i=0, j=0; i<nChannels; i++ ) {
3602 if ( handle->bufferInfos[i].isInput == ASIOTrue )
3603 memcpy( &stream_.deviceBuffer[j++*bufferBytes],
3604 handle->bufferInfos[i].buffers[bufferIndex],
3608 if ( stream_.doByteSwap[1] )
3609 byteSwapBuffer( stream_.deviceBuffer,
3610 stream_.bufferSize * stream_.nDeviceChannels[1],
3611 stream_.deviceFormat[1] );
3612 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
3616 for ( i=0, j=0; i<nChannels; i++ ) {
3617 if ( handle->bufferInfos[i].isInput == ASIOTrue ) {
3618 memcpy( &stream_.userBuffer[1][bufferBytes*j++],
3619 handle->bufferInfos[i].buffers[bufferIndex],
3624 if ( stream_.doByteSwap[1] )
3625 byteSwapBuffer( stream_.userBuffer[1],
3626 stream_.bufferSize * stream_.nUserChannels[1],
3627 stream_.userFormat );
3632 // The following call was suggested by Malte Clasen. While the API
3633 // documentation indicates it should not be required, some device
3634 // drivers apparently do not function correctly without it.
3637 RtApi::tickStreamTime();
3641 static void sampleRateChanged( ASIOSampleRate sRate )
3643 // The ASIO documentation says that this usually only happens during
3644 // external sync. Audio processing is not stopped by the driver,
3645 // actual sample rate might not have even changed, maybe only the
3646 // sample rate status of an AES/EBU or S/PDIF digital input at the
3649 RtApi *object = (RtApi *) asioCallbackInfo->object;
3651 object->stopStream();
3653 catch ( RtAudioError &exception ) {
3654 std::cerr << "\nRtApiAsio: sampleRateChanged() error (" << exception.getMessage() << ")!\n" << std::endl;
3658 std::cerr << "\nRtApiAsio: driver reports sample rate changed to " << sRate << " ... stream stopped!!!\n" << std::endl;
3661 static long asioMessages( long selector, long value, void* /*message*/, double* /*opt*/ )
3665 switch( selector ) {
3666 case kAsioSelectorSupported:
3667 if ( value == kAsioResetRequest
3668 || value == kAsioEngineVersion
3669 || value == kAsioResyncRequest
3670 || value == kAsioLatenciesChanged
3671 // The following three were added for ASIO 2.0, you don't
3672 // necessarily have to support them.
3673 || value == kAsioSupportsTimeInfo
3674 || value == kAsioSupportsTimeCode
3675 || value == kAsioSupportsInputMonitor)
3678 case kAsioResetRequest:
3679 // Defer the task and perform the reset of the driver during the
3680 // next "safe" situation. You cannot reset the driver right now,
3681 // as this code is called from the driver. Reset the driver is
3682 // done by completely destruct is. I.e. ASIOStop(),
3683 // ASIODisposeBuffers(), Destruction Afterwards you initialize the
3685 std::cerr << "\nRtApiAsio: driver reset requested!!!" << std::endl;
3688 case kAsioResyncRequest:
3689 // This informs the application that the driver encountered some
3690 // non-fatal data loss. It is used for synchronization purposes
3691 // of different media. Added mainly to work around the Win16Mutex
3692 // problems in Windows 95/98 with the Windows Multimedia system,
3693 // which could lose data because the Mutex was held too long by
3694 // another thread. However a driver can issue it in other
3696 // std::cerr << "\nRtApiAsio: driver resync requested!!!" << std::endl;
3700 case kAsioLatenciesChanged:
3701 // This will inform the host application that the drivers were
3702 // latencies changed. Beware, it this does not mean that the
3703 // buffer sizes have changed! You might need to update internal
3705 std::cerr << "\nRtApiAsio: driver latency may have changed!!!" << std::endl;
3708 case kAsioEngineVersion:
3709 // Return the supported ASIO version of the host application. If
3710 // a host application does not implement this selector, ASIO 1.0
3711 // is assumed by the driver.
3714 case kAsioSupportsTimeInfo:
3715 // Informs the driver whether the
3716 // asioCallbacks.bufferSwitchTimeInfo() callback is supported.
3717 // For compatibility with ASIO 1.0 drivers the host application
3718 // should always support the "old" bufferSwitch method, too.
3721 case kAsioSupportsTimeCode:
3722 // Informs the driver whether application is interested in time
3723 // code info. If an application does not need to know about time
3724 // code, the driver has less work to do.
3731 static const char* getAsioErrorString( ASIOError result )
3739 static const Messages m[] =
3741 { ASE_NotPresent, "Hardware input or output is not present or available." },
3742 { ASE_HWMalfunction, "Hardware is malfunctioning." },
3743 { ASE_InvalidParameter, "Invalid input parameter." },
3744 { ASE_InvalidMode, "Invalid mode." },
3745 { ASE_SPNotAdvancing, "Sample position not advancing." },
3746 { ASE_NoClock, "Sample clock or rate cannot be determined or is not present." },
3747 { ASE_NoMemory, "Not enough memory to complete the request." }
3750 for ( unsigned int i = 0; i < sizeof(m)/sizeof(m[0]); ++i )
3751 if ( m[i].value == result ) return m[i].message;
3753 return "Unknown error.";
3756 //******************** End of __WINDOWS_ASIO__ *********************//
3760 #if defined(__WINDOWS_WASAPI__) // Windows WASAPI API
3762 // Authored by Marcus Tomlinson <themarcustomlinson@gmail.com>, April 2014
3763 // - Introduces support for the Windows WASAPI API
3764 // - Aims to deliver bit streams to and from hardware at the lowest possible latency, via the absolute minimum buffer sizes required
3765 // - Provides flexible stream configuration to an otherwise strict and inflexible WASAPI interface
3766 // - Includes automatic internal conversion of sample rate and buffer size between hardware and the user
3773 #include <mferror.h>
3775 #include <mftransform.h>
3776 #include <wmcodecdsp.h>
3778 #include <audioclient.h>
3780 #include <mmdeviceapi.h>
3781 #include <FunctionDiscoveryKeys_devpkey.h>
3783 #ifndef MF_E_TRANSFORM_NEED_MORE_INPUT
3784 #define MF_E_TRANSFORM_NEED_MORE_INPUT _HRESULT_TYPEDEF_(0xc00d6d72)
3787 #ifndef MFSTARTUP_NOSOCKET
3788 #define MFSTARTUP_NOSOCKET 0x1
3792 #pragma comment( lib, "ksuser" )
3793 #pragma comment( lib, "mfplat.lib" )
3794 #pragma comment( lib, "mfuuid.lib" )
3795 #pragma comment( lib, "wmcodecdspuuid" )
3798 //=============================================================================
3800 #define SAFE_RELEASE( objectPtr )\
3803 objectPtr->Release();\
3807 typedef HANDLE ( __stdcall *TAvSetMmThreadCharacteristicsPtr )( LPCWSTR TaskName, LPDWORD TaskIndex );
3809 //-----------------------------------------------------------------------------
3811 // WASAPI dictates stream sample rate, format, channel count, and in some cases, buffer size.
3812 // Therefore we must perform all necessary conversions to user buffers in order to satisfy these
3813 // requirements. WasapiBuffer ring buffers are used between HwIn->UserIn and UserOut->HwOut to
3814 // provide intermediate storage for read / write synchronization.
3828 // sets the length of the internal ring buffer
3829 void setBufferSize( unsigned int bufferSize, unsigned int formatBytes ) {
3832 buffer_ = ( char* ) calloc( bufferSize, formatBytes );
3834 bufferSize_ = bufferSize;
3839 // attempt to push a buffer into the ring buffer at the current "in" index
3840 bool pushBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
3842 if ( !buffer || // incoming buffer is NULL
3843 bufferSize == 0 || // incoming buffer has no data
3844 bufferSize > bufferSize_ ) // incoming buffer too large
3849 unsigned int relOutIndex = outIndex_;
3850 unsigned int inIndexEnd = inIndex_ + bufferSize;
3851 if ( relOutIndex < inIndex_ && inIndexEnd >= bufferSize_ ) {
3852 relOutIndex += bufferSize_;
3855 // the "IN" index CAN BEGIN at the "OUT" index
3856 // the "IN" index CANNOT END at the "OUT" index
3857 if ( inIndex_ < relOutIndex && inIndexEnd >= relOutIndex ) {
3858 return false; // not enough space between "in" index and "out" index
3861 // copy buffer from external to internal
3862 int fromZeroSize = inIndex_ + bufferSize - bufferSize_;
3863 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
3864 int fromInSize = bufferSize - fromZeroSize;
3869 memcpy( &( ( char* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( char ) );
3870 memcpy( buffer_, &( ( char* ) buffer )[fromInSize], fromZeroSize * sizeof( char ) );
3872 case RTAUDIO_SINT16:
3873 memcpy( &( ( short* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( short ) );
3874 memcpy( buffer_, &( ( short* ) buffer )[fromInSize], fromZeroSize * sizeof( short ) );
3876 case RTAUDIO_SINT24:
3877 memcpy( &( ( S24* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( S24 ) );
3878 memcpy( buffer_, &( ( S24* ) buffer )[fromInSize], fromZeroSize * sizeof( S24 ) );
3880 case RTAUDIO_SINT32:
3881 memcpy( &( ( int* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( int ) );
3882 memcpy( buffer_, &( ( int* ) buffer )[fromInSize], fromZeroSize * sizeof( int ) );
3884 case RTAUDIO_FLOAT32:
3885 memcpy( &( ( float* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( float ) );
3886 memcpy( buffer_, &( ( float* ) buffer )[fromInSize], fromZeroSize * sizeof( float ) );
3888 case RTAUDIO_FLOAT64:
3889 memcpy( &( ( double* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( double ) );
3890 memcpy( buffer_, &( ( double* ) buffer )[fromInSize], fromZeroSize * sizeof( double ) );
3894 // update "in" index
3895 inIndex_ += bufferSize;
3896 inIndex_ %= bufferSize_;
3901 // attempt to pull a buffer from the ring buffer from the current "out" index
3902 bool pullBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
3904 if ( !buffer || // incoming buffer is NULL
3905 bufferSize == 0 || // incoming buffer has no data
3906 bufferSize > bufferSize_ ) // incoming buffer too large
3911 unsigned int relInIndex = inIndex_;
3912 unsigned int outIndexEnd = outIndex_ + bufferSize;
3913 if ( relInIndex < outIndex_ && outIndexEnd >= bufferSize_ ) {
3914 relInIndex += bufferSize_;
3917 // the "OUT" index CANNOT BEGIN at the "IN" index
3918 // the "OUT" index CAN END at the "IN" index
3919 if ( outIndex_ <= relInIndex && outIndexEnd > relInIndex ) {
3920 return false; // not enough space between "out" index and "in" index
3923 // copy buffer from internal to external
3924 int fromZeroSize = outIndex_ + bufferSize - bufferSize_;
3925 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
3926 int fromOutSize = bufferSize - fromZeroSize;
3931 memcpy( buffer, &( ( char* ) buffer_ )[outIndex_], fromOutSize * sizeof( char ) );
3932 memcpy( &( ( char* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( char ) );
3934 case RTAUDIO_SINT16:
3935 memcpy( buffer, &( ( short* ) buffer_ )[outIndex_], fromOutSize * sizeof( short ) );
3936 memcpy( &( ( short* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( short ) );
3938 case RTAUDIO_SINT24:
3939 memcpy( buffer, &( ( S24* ) buffer_ )[outIndex_], fromOutSize * sizeof( S24 ) );
3940 memcpy( &( ( S24* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( S24 ) );
3942 case RTAUDIO_SINT32:
3943 memcpy( buffer, &( ( int* ) buffer_ )[outIndex_], fromOutSize * sizeof( int ) );
3944 memcpy( &( ( int* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( int ) );
3946 case RTAUDIO_FLOAT32:
3947 memcpy( buffer, &( ( float* ) buffer_ )[outIndex_], fromOutSize * sizeof( float ) );
3948 memcpy( &( ( float* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( float ) );
3950 case RTAUDIO_FLOAT64:
3951 memcpy( buffer, &( ( double* ) buffer_ )[outIndex_], fromOutSize * sizeof( double ) );
3952 memcpy( &( ( double* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( double ) );
3956 // update "out" index
3957 outIndex_ += bufferSize;
3958 outIndex_ %= bufferSize_;
3965 unsigned int bufferSize_;
3966 unsigned int inIndex_;
3967 unsigned int outIndex_;
3970 //-----------------------------------------------------------------------------
3972 // In order to satisfy WASAPI's buffer requirements, we need a means of converting sample rate
3973 // between HW and the user. The WasapiResampler class is used to perform this conversion between
3974 // HwIn->UserIn and UserOut->HwOut during the stream callback loop.
3975 class WasapiResampler
3978 WasapiResampler( bool isFloat, unsigned int bitsPerSample, unsigned int channelCount,
3979 unsigned int inSampleRate, unsigned int outSampleRate )
3980 : _bytesPerSample( bitsPerSample / 8 )
3981 , _channelCount( channelCount )
3982 , _sampleRatio( ( float ) outSampleRate / inSampleRate )
3983 , _transformUnk( NULL )
3984 , _transform( NULL )
3985 , _mediaType( NULL )
3986 , _inputMediaType( NULL )
3987 , _outputMediaType( NULL )
3989 #ifdef __IWMResamplerProps_FWD_DEFINED__
3990 , _resamplerProps( NULL )
3993 // 1. Initialization
3995 MFStartup( MF_VERSION, MFSTARTUP_NOSOCKET );
3997 // 2. Create Resampler Transform Object
3999 CoCreateInstance( CLSID_CResamplerMediaObject, NULL, CLSCTX_INPROC_SERVER,
4000 IID_IUnknown, ( void** ) &_transformUnk );
4002 _transformUnk->QueryInterface( IID_PPV_ARGS( &_transform ) );
4004 #ifdef __IWMResamplerProps_FWD_DEFINED__
4005 _transformUnk->QueryInterface( IID_PPV_ARGS( &_resamplerProps ) );
4006 _resamplerProps->SetHalfFilterLength( 60 ); // best conversion quality
4009 // 3. Specify input / output format
4011 MFCreateMediaType( &_mediaType );
4012 _mediaType->SetGUID( MF_MT_MAJOR_TYPE, MFMediaType_Audio );
4013 _mediaType->SetGUID( MF_MT_SUBTYPE, isFloat ? MFAudioFormat_Float : MFAudioFormat_PCM );
4014 _mediaType->SetUINT32( MF_MT_AUDIO_NUM_CHANNELS, channelCount );
4015 _mediaType->SetUINT32( MF_MT_AUDIO_SAMPLES_PER_SECOND, inSampleRate );
4016 _mediaType->SetUINT32( MF_MT_AUDIO_BLOCK_ALIGNMENT, _bytesPerSample * channelCount );
4017 _mediaType->SetUINT32( MF_MT_AUDIO_AVG_BYTES_PER_SECOND, _bytesPerSample * channelCount * inSampleRate );
4018 _mediaType->SetUINT32( MF_MT_AUDIO_BITS_PER_SAMPLE, bitsPerSample );
4019 _mediaType->SetUINT32( MF_MT_ALL_SAMPLES_INDEPENDENT, TRUE );
4021 MFCreateMediaType( &_inputMediaType );
4022 _mediaType->CopyAllItems( _inputMediaType );
4024 _transform->SetInputType( 0, _inputMediaType, 0 );
4026 MFCreateMediaType( &_outputMediaType );
4027 _mediaType->CopyAllItems( _outputMediaType );
4029 _outputMediaType->SetUINT32( MF_MT_AUDIO_SAMPLES_PER_SECOND, outSampleRate );
4030 _outputMediaType->SetUINT32( MF_MT_AUDIO_AVG_BYTES_PER_SECOND, _bytesPerSample * channelCount * outSampleRate );
4032 _transform->SetOutputType( 0, _outputMediaType, 0 );
4034 // 4. Send stream start messages to Resampler
4036 _transform->ProcessMessage( MFT_MESSAGE_COMMAND_FLUSH, 0 );
4037 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_BEGIN_STREAMING, 0 );
4038 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_START_OF_STREAM, 0 );
4043 // 8. Send stream stop messages to Resampler
4045 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_END_OF_STREAM, 0 );
4046 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_END_STREAMING, 0 );
4052 SAFE_RELEASE( _transformUnk );
4053 SAFE_RELEASE( _transform );
4054 SAFE_RELEASE( _mediaType );
4055 SAFE_RELEASE( _inputMediaType );
4056 SAFE_RELEASE( _outputMediaType );
4058 #ifdef __IWMResamplerProps_FWD_DEFINED__
4059 SAFE_RELEASE( _resamplerProps );
4063 void Convert( char* outBuffer, const char* inBuffer, unsigned int inSampleCount, unsigned int& outSampleCount )
4065 unsigned int inputBufferSize = _bytesPerSample * _channelCount * inSampleCount;
4066 if ( _sampleRatio == 1 )
4068 // no sample rate conversion required
4069 memcpy( outBuffer, inBuffer, inputBufferSize );
4070 outSampleCount = inSampleCount;
4074 unsigned int outputBufferSize = ( unsigned int ) ceilf( inputBufferSize * _sampleRatio ) + ( _bytesPerSample * _channelCount );
4076 IMFMediaBuffer* rInBuffer;
4077 IMFSample* rInSample;
4078 BYTE* rInByteBuffer = NULL;
4080 // 5. Create Sample object from input data
4082 MFCreateMemoryBuffer( inputBufferSize, &rInBuffer );
4084 rInBuffer->Lock( &rInByteBuffer, NULL, NULL );
4085 memcpy( rInByteBuffer, inBuffer, inputBufferSize );
4086 rInBuffer->Unlock();
4087 rInByteBuffer = NULL;
4089 rInBuffer->SetCurrentLength( inputBufferSize );
4091 MFCreateSample( &rInSample );
4092 rInSample->AddBuffer( rInBuffer );
4094 // 6. Pass input data to Resampler
4096 _transform->ProcessInput( 0, rInSample, 0 );
4098 SAFE_RELEASE( rInBuffer );
4099 SAFE_RELEASE( rInSample );
4101 // 7. Perform sample rate conversion
4103 IMFMediaBuffer* rOutBuffer = NULL;
4104 BYTE* rOutByteBuffer = NULL;
4106 MFT_OUTPUT_DATA_BUFFER rOutDataBuffer;
4108 DWORD rBytes = outputBufferSize; // maximum bytes accepted per ProcessOutput
4110 // 7.1 Create Sample object for output data
4112 memset( &rOutDataBuffer, 0, sizeof rOutDataBuffer );
4113 MFCreateSample( &( rOutDataBuffer.pSample ) );
4114 MFCreateMemoryBuffer( rBytes, &rOutBuffer );
4115 rOutDataBuffer.pSample->AddBuffer( rOutBuffer );
4116 rOutDataBuffer.dwStreamID = 0;
4117 rOutDataBuffer.dwStatus = 0;
4118 rOutDataBuffer.pEvents = NULL;
4120 // 7.2 Get output data from Resampler
4122 if ( _transform->ProcessOutput( 0, 1, &rOutDataBuffer, &rStatus ) == MF_E_TRANSFORM_NEED_MORE_INPUT )
4125 SAFE_RELEASE( rOutBuffer );
4126 SAFE_RELEASE( rOutDataBuffer.pSample );
4130 // 7.3 Write output data to outBuffer
4132 SAFE_RELEASE( rOutBuffer );
4133 rOutDataBuffer.pSample->ConvertToContiguousBuffer( &rOutBuffer );
4134 rOutBuffer->GetCurrentLength( &rBytes );
4136 rOutBuffer->Lock( &rOutByteBuffer, NULL, NULL );
4137 memcpy( outBuffer, rOutByteBuffer, rBytes );
4138 rOutBuffer->Unlock();
4139 rOutByteBuffer = NULL;
4141 outSampleCount = rBytes / _bytesPerSample / _channelCount;
4142 SAFE_RELEASE( rOutBuffer );
4143 SAFE_RELEASE( rOutDataBuffer.pSample );
4147 unsigned int _bytesPerSample;
4148 unsigned int _channelCount;
4151 IUnknown* _transformUnk;
4152 IMFTransform* _transform;
4153 IMFMediaType* _mediaType;
4154 IMFMediaType* _inputMediaType;
4155 IMFMediaType* _outputMediaType;
4157 #ifdef __IWMResamplerProps_FWD_DEFINED__
4158 IWMResamplerProps* _resamplerProps;
4162 //-----------------------------------------------------------------------------
4164 // A structure to hold various information related to the WASAPI implementation.
4167 IAudioClient* captureAudioClient;
4168 IAudioClient* renderAudioClient;
4169 IAudioCaptureClient* captureClient;
4170 IAudioRenderClient* renderClient;
4171 HANDLE captureEvent;
4175 : captureAudioClient( NULL ),
4176 renderAudioClient( NULL ),
4177 captureClient( NULL ),
4178 renderClient( NULL ),
4179 captureEvent( NULL ),
4180 renderEvent( NULL ) {}
4183 //=============================================================================
4185 RtApiWasapi::RtApiWasapi()
4186 : coInitialized_( false ), deviceEnumerator_( NULL )
4188 // WASAPI can run either apartment or multi-threaded
4189 HRESULT hr = CoInitialize( NULL );
4190 if ( !FAILED( hr ) )
4191 coInitialized_ = true;
4193 // Instantiate device enumerator
4194 hr = CoCreateInstance( __uuidof( MMDeviceEnumerator ), NULL,
4195 CLSCTX_ALL, __uuidof( IMMDeviceEnumerator ),
4196 ( void** ) &deviceEnumerator_ );
4198 // If this runs on an old Windows, it will fail. Ignore and proceed.
4200 deviceEnumerator_ = NULL;
4203 //-----------------------------------------------------------------------------
4205 RtApiWasapi::~RtApiWasapi()
4207 if ( stream_.state != STREAM_CLOSED )
4210 SAFE_RELEASE( deviceEnumerator_ );
4212 // If this object previously called CoInitialize()
4213 if ( coInitialized_ )
4217 //=============================================================================
4219 unsigned int RtApiWasapi::getDeviceCount( void )
4221 unsigned int captureDeviceCount = 0;
4222 unsigned int renderDeviceCount = 0;
4224 IMMDeviceCollection* captureDevices = NULL;
4225 IMMDeviceCollection* renderDevices = NULL;
4227 if ( !deviceEnumerator_ )
4230 // Count capture devices
4232 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4233 if ( FAILED( hr ) ) {
4234 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device collection.";
4238 hr = captureDevices->GetCount( &captureDeviceCount );
4239 if ( FAILED( hr ) ) {
4240 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device count.";
4244 // Count render devices
4245 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4246 if ( FAILED( hr ) ) {
4247 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device collection.";
4251 hr = renderDevices->GetCount( &renderDeviceCount );
4252 if ( FAILED( hr ) ) {
4253 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device count.";
4258 // release all references
4259 SAFE_RELEASE( captureDevices );
4260 SAFE_RELEASE( renderDevices );
4262 if ( errorText_.empty() )
4263 return captureDeviceCount + renderDeviceCount;
4265 error( RtAudioError::DRIVER_ERROR );
4269 //-----------------------------------------------------------------------------
4271 RtAudio::DeviceInfo RtApiWasapi::getDeviceInfo( unsigned int device )
4273 RtAudio::DeviceInfo info;
4274 unsigned int captureDeviceCount = 0;
4275 unsigned int renderDeviceCount = 0;
4276 std::string defaultDeviceName;
4277 bool isCaptureDevice = false;
4279 PROPVARIANT deviceNameProp;
4280 PROPVARIANT defaultDeviceNameProp;
4282 IMMDeviceCollection* captureDevices = NULL;
4283 IMMDeviceCollection* renderDevices = NULL;
4284 IMMDevice* devicePtr = NULL;
4285 IMMDevice* defaultDevicePtr = NULL;
4286 IAudioClient* audioClient = NULL;
4287 IPropertyStore* devicePropStore = NULL;
4288 IPropertyStore* defaultDevicePropStore = NULL;
4290 WAVEFORMATEX* deviceFormat = NULL;
4291 WAVEFORMATEX* closestMatchFormat = NULL;
4294 info.probed = false;
4296 // Count capture devices
4298 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4299 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4300 if ( FAILED( hr ) ) {
4301 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device collection.";
4305 hr = captureDevices->GetCount( &captureDeviceCount );
4306 if ( FAILED( hr ) ) {
4307 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device count.";
4311 // Count render devices
4312 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4313 if ( FAILED( hr ) ) {
4314 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device collection.";
4318 hr = renderDevices->GetCount( &renderDeviceCount );
4319 if ( FAILED( hr ) ) {
4320 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device count.";
4324 // validate device index
4325 if ( device >= captureDeviceCount + renderDeviceCount ) {
4326 errorText_ = "RtApiWasapi::getDeviceInfo: Invalid device index.";
4327 errorType = RtAudioError::INVALID_USE;
4331 // determine whether index falls within capture or render devices
4332 if ( device >= renderDeviceCount ) {
4333 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
4334 if ( FAILED( hr ) ) {
4335 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device handle.";
4338 isCaptureDevice = true;
4341 hr = renderDevices->Item( device, &devicePtr );
4342 if ( FAILED( hr ) ) {
4343 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device handle.";
4346 isCaptureDevice = false;
4349 // get default device name
4350 if ( isCaptureDevice ) {
4351 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eCapture, eConsole, &defaultDevicePtr );
4352 if ( FAILED( hr ) ) {
4353 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default capture device handle.";
4358 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eRender, eConsole, &defaultDevicePtr );
4359 if ( FAILED( hr ) ) {
4360 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default render device handle.";
4365 hr = defaultDevicePtr->OpenPropertyStore( STGM_READ, &defaultDevicePropStore );
4366 if ( FAILED( hr ) ) {
4367 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open default device property store.";
4370 PropVariantInit( &defaultDeviceNameProp );
4372 hr = defaultDevicePropStore->GetValue( PKEY_Device_FriendlyName, &defaultDeviceNameProp );
4373 if ( FAILED( hr ) ) {
4374 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default device property: PKEY_Device_FriendlyName.";
4378 defaultDeviceName = convertCharPointerToStdString(defaultDeviceNameProp.pwszVal);
4381 hr = devicePtr->OpenPropertyStore( STGM_READ, &devicePropStore );
4382 if ( FAILED( hr ) ) {
4383 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open device property store.";
4387 PropVariantInit( &deviceNameProp );
4389 hr = devicePropStore->GetValue( PKEY_Device_FriendlyName, &deviceNameProp );
4390 if ( FAILED( hr ) ) {
4391 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device property: PKEY_Device_FriendlyName.";
4395 info.name =convertCharPointerToStdString(deviceNameProp.pwszVal);
4398 if ( isCaptureDevice ) {
4399 info.isDefaultInput = info.name == defaultDeviceName;
4400 info.isDefaultOutput = false;
4403 info.isDefaultInput = false;
4404 info.isDefaultOutput = info.name == defaultDeviceName;
4408 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL, NULL, ( void** ) &audioClient );
4409 if ( FAILED( hr ) ) {
4410 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device audio client.";
4414 hr = audioClient->GetMixFormat( &deviceFormat );
4415 if ( FAILED( hr ) ) {
4416 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device mix format.";
4420 if ( isCaptureDevice ) {
4421 info.inputChannels = deviceFormat->nChannels;
4422 info.outputChannels = 0;
4423 info.duplexChannels = 0;
4426 info.inputChannels = 0;
4427 info.outputChannels = deviceFormat->nChannels;
4428 info.duplexChannels = 0;
4432 info.sampleRates.clear();
4434 // allow support for all sample rates as we have a built-in sample rate converter
4435 for ( unsigned int i = 0; i < MAX_SAMPLE_RATES; i++ ) {
4436 info.sampleRates.push_back( SAMPLE_RATES[i] );
4438 info.preferredSampleRate = deviceFormat->nSamplesPerSec;
4441 info.nativeFormats = 0;
4443 if ( deviceFormat->wFormatTag == WAVE_FORMAT_IEEE_FLOAT ||
4444 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
4445 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_IEEE_FLOAT ) )
4447 if ( deviceFormat->wBitsPerSample == 32 ) {
4448 info.nativeFormats |= RTAUDIO_FLOAT32;
4450 else if ( deviceFormat->wBitsPerSample == 64 ) {
4451 info.nativeFormats |= RTAUDIO_FLOAT64;
4454 else if ( deviceFormat->wFormatTag == WAVE_FORMAT_PCM ||
4455 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
4456 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_PCM ) )
4458 if ( deviceFormat->wBitsPerSample == 8 ) {
4459 info.nativeFormats |= RTAUDIO_SINT8;
4461 else if ( deviceFormat->wBitsPerSample == 16 ) {
4462 info.nativeFormats |= RTAUDIO_SINT16;
4464 else if ( deviceFormat->wBitsPerSample == 24 ) {
4465 info.nativeFormats |= RTAUDIO_SINT24;
4467 else if ( deviceFormat->wBitsPerSample == 32 ) {
4468 info.nativeFormats |= RTAUDIO_SINT32;
4476 // release all references
4477 PropVariantClear( &deviceNameProp );
4478 PropVariantClear( &defaultDeviceNameProp );
4480 SAFE_RELEASE( captureDevices );
4481 SAFE_RELEASE( renderDevices );
4482 SAFE_RELEASE( devicePtr );
4483 SAFE_RELEASE( defaultDevicePtr );
4484 SAFE_RELEASE( audioClient );
4485 SAFE_RELEASE( devicePropStore );
4486 SAFE_RELEASE( defaultDevicePropStore );
4488 CoTaskMemFree( deviceFormat );
4489 CoTaskMemFree( closestMatchFormat );
4491 if ( !errorText_.empty() )
4496 //-----------------------------------------------------------------------------
4498 unsigned int RtApiWasapi::getDefaultOutputDevice( void )
4500 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
4501 if ( getDeviceInfo( i ).isDefaultOutput ) {
4509 //-----------------------------------------------------------------------------
4511 unsigned int RtApiWasapi::getDefaultInputDevice( void )
4513 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
4514 if ( getDeviceInfo( i ).isDefaultInput ) {
4522 //-----------------------------------------------------------------------------
4524 void RtApiWasapi::closeStream( void )
4526 if ( stream_.state == STREAM_CLOSED ) {
4527 errorText_ = "RtApiWasapi::closeStream: No open stream to close.";
4528 error( RtAudioError::WARNING );
4532 if ( stream_.state != STREAM_STOPPED )
4535 // clean up stream memory
4536 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient )
4537 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient )
4539 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureClient )
4540 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderClient )
4542 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent )
4543 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent );
4545 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent )
4546 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent );
4548 delete ( WasapiHandle* ) stream_.apiHandle;
4549 stream_.apiHandle = NULL;
4551 for ( int i = 0; i < 2; i++ ) {
4552 if ( stream_.userBuffer[i] ) {
4553 free( stream_.userBuffer[i] );
4554 stream_.userBuffer[i] = 0;
4558 if ( stream_.deviceBuffer ) {
4559 free( stream_.deviceBuffer );
4560 stream_.deviceBuffer = 0;
4563 // update stream state
4564 stream_.state = STREAM_CLOSED;
4567 //-----------------------------------------------------------------------------
4569 void RtApiWasapi::startStream( void )
4572 RtApi::startStream();
4574 if ( stream_.state == STREAM_RUNNING ) {
4575 errorText_ = "RtApiWasapi::startStream: The stream is already running.";
4576 error( RtAudioError::WARNING );
4580 #if defined( HAVE_GETTIMEOFDAY )
4581 gettimeofday( &stream_.lastTickTimestamp, NULL );
4584 // update stream state
4585 stream_.state = STREAM_RUNNING;
4587 // create WASAPI stream thread
4588 stream_.callbackInfo.thread = ( ThreadHandle ) CreateThread( NULL, 0, runWasapiThread, this, CREATE_SUSPENDED, NULL );
4590 if ( !stream_.callbackInfo.thread ) {
4591 errorText_ = "RtApiWasapi::startStream: Unable to instantiate callback thread.";
4592 error( RtAudioError::THREAD_ERROR );
4595 SetThreadPriority( ( void* ) stream_.callbackInfo.thread, stream_.callbackInfo.priority );
4596 ResumeThread( ( void* ) stream_.callbackInfo.thread );
4600 //-----------------------------------------------------------------------------
4602 void RtApiWasapi::stopStream( void )
4606 if ( stream_.state == STREAM_STOPPED ) {
4607 errorText_ = "RtApiWasapi::stopStream: The stream is already stopped.";
4608 error( RtAudioError::WARNING );
4612 // inform stream thread by setting stream state to STREAM_STOPPING
4613 stream_.state = STREAM_STOPPING;
4615 // wait until stream thread is stopped
4616 while( stream_.state != STREAM_STOPPED ) {
4620 // Wait for the last buffer to play before stopping.
4621 Sleep( 1000 * stream_.bufferSize / stream_.sampleRate );
4623 // close thread handle
4624 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
4625 errorText_ = "RtApiWasapi::stopStream: Unable to close callback thread.";
4626 error( RtAudioError::THREAD_ERROR );
4630 stream_.callbackInfo.thread = (ThreadHandle) NULL;
4633 //-----------------------------------------------------------------------------
4635 void RtApiWasapi::abortStream( void )
4639 if ( stream_.state == STREAM_STOPPED ) {
4640 errorText_ = "RtApiWasapi::abortStream: The stream is already stopped.";
4641 error( RtAudioError::WARNING );
4645 // inform stream thread by setting stream state to STREAM_STOPPING
4646 stream_.state = STREAM_STOPPING;
4648 // wait until stream thread is stopped
4649 while ( stream_.state != STREAM_STOPPED ) {
4653 // close thread handle
4654 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
4655 errorText_ = "RtApiWasapi::abortStream: Unable to close callback thread.";
4656 error( RtAudioError::THREAD_ERROR );
4660 stream_.callbackInfo.thread = (ThreadHandle) NULL;
4663 //-----------------------------------------------------------------------------
4665 bool RtApiWasapi::probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
4666 unsigned int firstChannel, unsigned int sampleRate,
4667 RtAudioFormat format, unsigned int* bufferSize,
4668 RtAudio::StreamOptions* options )
4670 bool methodResult = FAILURE;
4671 unsigned int captureDeviceCount = 0;
4672 unsigned int renderDeviceCount = 0;
4674 IMMDeviceCollection* captureDevices = NULL;
4675 IMMDeviceCollection* renderDevices = NULL;
4676 IMMDevice* devicePtr = NULL;
4677 WAVEFORMATEX* deviceFormat = NULL;
4678 unsigned int bufferBytes;
4679 stream_.state = STREAM_STOPPED;
4681 // create API Handle if not already created
4682 if ( !stream_.apiHandle )
4683 stream_.apiHandle = ( void* ) new WasapiHandle();
4685 // Count capture devices
4687 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4688 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4689 if ( FAILED( hr ) ) {
4690 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device collection.";
4694 hr = captureDevices->GetCount( &captureDeviceCount );
4695 if ( FAILED( hr ) ) {
4696 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device count.";
4700 // Count render devices
4701 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4702 if ( FAILED( hr ) ) {
4703 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device collection.";
4707 hr = renderDevices->GetCount( &renderDeviceCount );
4708 if ( FAILED( hr ) ) {
4709 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device count.";
4713 // validate device index
4714 if ( device >= captureDeviceCount + renderDeviceCount ) {
4715 errorType = RtAudioError::INVALID_USE;
4716 errorText_ = "RtApiWasapi::probeDeviceOpen: Invalid device index.";
4720 // if device index falls within capture devices
4721 if ( device >= renderDeviceCount ) {
4722 if ( mode != INPUT ) {
4723 errorType = RtAudioError::INVALID_USE;
4724 errorText_ = "RtApiWasapi::probeDeviceOpen: Capture device selected as output device.";
4728 // retrieve captureAudioClient from devicePtr
4729 IAudioClient*& captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4731 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
4732 if ( FAILED( hr ) ) {
4733 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device handle.";
4737 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4738 NULL, ( void** ) &captureAudioClient );
4739 if ( FAILED( hr ) ) {
4740 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device audio client.";
4744 hr = captureAudioClient->GetMixFormat( &deviceFormat );
4745 if ( FAILED( hr ) ) {
4746 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device mix format.";
4750 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4751 captureAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4754 // if device index falls within render devices and is configured for loopback
4755 if ( device < renderDeviceCount && mode == INPUT )
4757 // if renderAudioClient is not initialised, initialise it now
4758 IAudioClient*& renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4759 if ( !renderAudioClient )
4761 probeDeviceOpen( device, OUTPUT, channels, firstChannel, sampleRate, format, bufferSize, options );
4764 // retrieve captureAudioClient from devicePtr
4765 IAudioClient*& captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4767 hr = renderDevices->Item( device, &devicePtr );
4768 if ( FAILED( hr ) ) {
4769 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device handle.";
4773 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4774 NULL, ( void** ) &captureAudioClient );
4775 if ( FAILED( hr ) ) {
4776 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device audio client.";
4780 hr = captureAudioClient->GetMixFormat( &deviceFormat );
4781 if ( FAILED( hr ) ) {
4782 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device mix format.";
4786 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4787 captureAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4790 // if device index falls within render devices and is configured for output
4791 if ( device < renderDeviceCount && mode == OUTPUT )
4793 // if renderAudioClient is already initialised, don't initialise it again
4794 IAudioClient*& renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4795 if ( renderAudioClient )
4797 methodResult = SUCCESS;
4801 hr = renderDevices->Item( device, &devicePtr );
4802 if ( FAILED( hr ) ) {
4803 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device handle.";
4807 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4808 NULL, ( void** ) &renderAudioClient );
4809 if ( FAILED( hr ) ) {
4810 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device audio client.";
4814 hr = renderAudioClient->GetMixFormat( &deviceFormat );
4815 if ( FAILED( hr ) ) {
4816 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device mix format.";
4820 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4821 renderAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4825 if ( ( stream_.mode == OUTPUT && mode == INPUT ) ||
4826 ( stream_.mode == INPUT && mode == OUTPUT ) ) {
4827 stream_.mode = DUPLEX;
4830 stream_.mode = mode;
4833 stream_.device[mode] = device;
4834 stream_.doByteSwap[mode] = false;
4835 stream_.sampleRate = sampleRate;
4836 stream_.bufferSize = *bufferSize;
4837 stream_.nBuffers = 1;
4838 stream_.nUserChannels[mode] = channels;
4839 stream_.channelOffset[mode] = firstChannel;
4840 stream_.userFormat = format;
4841 stream_.deviceFormat[mode] = getDeviceInfo( device ).nativeFormats;
4843 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
4844 stream_.userInterleaved = false;
4846 stream_.userInterleaved = true;
4847 stream_.deviceInterleaved[mode] = true;
4849 // Set flags for buffer conversion.
4850 stream_.doConvertBuffer[mode] = false;
4851 if ( stream_.userFormat != stream_.deviceFormat[mode] ||
4852 stream_.nUserChannels[0] != stream_.nDeviceChannels[0] ||
4853 stream_.nUserChannels[1] != stream_.nDeviceChannels[1] )
4854 stream_.doConvertBuffer[mode] = true;
4855 else if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
4856 stream_.nUserChannels[mode] > 1 )
4857 stream_.doConvertBuffer[mode] = true;
4859 if ( stream_.doConvertBuffer[mode] )
4860 setConvertInfo( mode, 0 );
4862 // Allocate necessary internal buffers
4863 bufferBytes = stream_.nUserChannels[mode] * stream_.bufferSize * formatBytes( stream_.userFormat );
4865 stream_.userBuffer[mode] = ( char* ) calloc( bufferBytes, 1 );
4866 if ( !stream_.userBuffer[mode] ) {
4867 errorType = RtAudioError::MEMORY_ERROR;
4868 errorText_ = "RtApiWasapi::probeDeviceOpen: Error allocating user buffer memory.";
4872 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME )
4873 stream_.callbackInfo.priority = 15;
4875 stream_.callbackInfo.priority = 0;
4877 ///! TODO: RTAUDIO_MINIMIZE_LATENCY // Provide stream buffers directly to callback
4878 ///! TODO: RTAUDIO_HOG_DEVICE // Exclusive mode
4880 methodResult = SUCCESS;
4884 SAFE_RELEASE( captureDevices );
4885 SAFE_RELEASE( renderDevices );
4886 SAFE_RELEASE( devicePtr );
4887 CoTaskMemFree( deviceFormat );
4889 // if method failed, close the stream
4890 if ( methodResult == FAILURE )
4893 if ( !errorText_.empty() )
4895 return methodResult;
4898 //=============================================================================
4900 DWORD WINAPI RtApiWasapi::runWasapiThread( void* wasapiPtr )
4903 ( ( RtApiWasapi* ) wasapiPtr )->wasapiThread();
4908 DWORD WINAPI RtApiWasapi::stopWasapiThread( void* wasapiPtr )
4911 ( ( RtApiWasapi* ) wasapiPtr )->stopStream();
4916 DWORD WINAPI RtApiWasapi::abortWasapiThread( void* wasapiPtr )
4919 ( ( RtApiWasapi* ) wasapiPtr )->abortStream();
4924 //-----------------------------------------------------------------------------
4926 void RtApiWasapi::wasapiThread()
4928 // as this is a new thread, we must CoInitialize it
4929 CoInitialize( NULL );
4933 IAudioClient* captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4934 IAudioClient* renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4935 IAudioCaptureClient* captureClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureClient;
4936 IAudioRenderClient* renderClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderClient;
4937 HANDLE captureEvent = ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent;
4938 HANDLE renderEvent = ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent;
4940 WAVEFORMATEX* captureFormat = NULL;
4941 WAVEFORMATEX* renderFormat = NULL;
4942 float captureSrRatio = 0.0f;
4943 float renderSrRatio = 0.0f;
4944 WasapiBuffer captureBuffer;
4945 WasapiBuffer renderBuffer;
4946 WasapiResampler* captureResampler = NULL;
4947 WasapiResampler* renderResampler = NULL;
4949 // declare local stream variables
4950 RtAudioCallback callback = ( RtAudioCallback ) stream_.callbackInfo.callback;
4951 BYTE* streamBuffer = NULL;
4952 unsigned long captureFlags = 0;
4953 unsigned int bufferFrameCount = 0;
4954 unsigned int numFramesPadding = 0;
4955 unsigned int convBufferSize = 0;
4956 bool loopbackEnabled = stream_.device[INPUT] == stream_.device[OUTPUT];
4957 bool callbackPushed = true;
4958 bool callbackPulled = false;
4959 bool callbackStopped = false;
4960 int callbackResult = 0;
4962 // convBuffer is used to store converted buffers between WASAPI and the user
4963 char* convBuffer = NULL;
4964 unsigned int convBuffSize = 0;
4965 unsigned int deviceBuffSize = 0;
4967 std::string errorText;
4968 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4970 // Attempt to assign "Pro Audio" characteristic to thread
4971 HMODULE AvrtDll = LoadLibrary( (LPCTSTR) "AVRT.dll" );
4973 DWORD taskIndex = 0;
4974 TAvSetMmThreadCharacteristicsPtr AvSetMmThreadCharacteristicsPtr =
4975 ( TAvSetMmThreadCharacteristicsPtr ) (void(*)()) GetProcAddress( AvrtDll, "AvSetMmThreadCharacteristicsW" );
4976 AvSetMmThreadCharacteristicsPtr( L"Pro Audio", &taskIndex );
4977 FreeLibrary( AvrtDll );
4980 // start capture stream if applicable
4981 if ( captureAudioClient ) {
4982 hr = captureAudioClient->GetMixFormat( &captureFormat );
4983 if ( FAILED( hr ) ) {
4984 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
4988 // init captureResampler
4989 captureResampler = new WasapiResampler( stream_.deviceFormat[INPUT] == RTAUDIO_FLOAT32 || stream_.deviceFormat[INPUT] == RTAUDIO_FLOAT64,
4990 formatBytes( stream_.deviceFormat[INPUT] ) * 8, stream_.nDeviceChannels[INPUT],
4991 captureFormat->nSamplesPerSec, stream_.sampleRate );
4993 captureSrRatio = ( ( float ) captureFormat->nSamplesPerSec / stream_.sampleRate );
4995 if ( !captureClient ) {
4996 hr = captureAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
4997 loopbackEnabled ? AUDCLNT_STREAMFLAGS_LOOPBACK : AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
5002 if ( FAILED( hr ) ) {
5003 errorText = "RtApiWasapi::wasapiThread: Unable to initialize capture audio client.";
5007 hr = captureAudioClient->GetService( __uuidof( IAudioCaptureClient ),
5008 ( void** ) &captureClient );
5009 if ( FAILED( hr ) ) {
5010 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve capture client handle.";
5014 // don't configure captureEvent if in loopback mode
5015 if ( !loopbackEnabled )
5017 // configure captureEvent to trigger on every available capture buffer
5018 captureEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
5019 if ( !captureEvent ) {
5020 errorType = RtAudioError::SYSTEM_ERROR;
5021 errorText = "RtApiWasapi::wasapiThread: Unable to create capture event.";
5025 hr = captureAudioClient->SetEventHandle( captureEvent );
5026 if ( FAILED( hr ) ) {
5027 errorText = "RtApiWasapi::wasapiThread: Unable to set capture event handle.";
5031 ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent = captureEvent;
5034 ( ( WasapiHandle* ) stream_.apiHandle )->captureClient = captureClient;
5036 // reset the capture stream
5037 hr = captureAudioClient->Reset();
5038 if ( FAILED( hr ) ) {
5039 errorText = "RtApiWasapi::wasapiThread: Unable to reset capture stream.";
5043 // start the capture stream
5044 hr = captureAudioClient->Start();
5045 if ( FAILED( hr ) ) {
5046 errorText = "RtApiWasapi::wasapiThread: Unable to start capture stream.";
5051 unsigned int inBufferSize = 0;
5052 hr = captureAudioClient->GetBufferSize( &inBufferSize );
5053 if ( FAILED( hr ) ) {
5054 errorText = "RtApiWasapi::wasapiThread: Unable to get capture buffer size.";
5058 // scale outBufferSize according to stream->user sample rate ratio
5059 unsigned int outBufferSize = ( unsigned int ) ceilf( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT];
5060 inBufferSize *= stream_.nDeviceChannels[INPUT];
5062 // set captureBuffer size
5063 captureBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[INPUT] ) );
5066 // start render stream if applicable
5067 if ( renderAudioClient ) {
5068 hr = renderAudioClient->GetMixFormat( &renderFormat );
5069 if ( FAILED( hr ) ) {
5070 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
5074 // init renderResampler
5075 renderResampler = new WasapiResampler( stream_.deviceFormat[OUTPUT] == RTAUDIO_FLOAT32 || stream_.deviceFormat[OUTPUT] == RTAUDIO_FLOAT64,
5076 formatBytes( stream_.deviceFormat[OUTPUT] ) * 8, stream_.nDeviceChannels[OUTPUT],
5077 stream_.sampleRate, renderFormat->nSamplesPerSec );
5079 renderSrRatio = ( ( float ) renderFormat->nSamplesPerSec / stream_.sampleRate );
5081 if ( !renderClient ) {
5082 hr = renderAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
5083 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
5088 if ( FAILED( hr ) ) {
5089 errorText = "RtApiWasapi::wasapiThread: Unable to initialize render audio client.";
5093 hr = renderAudioClient->GetService( __uuidof( IAudioRenderClient ),
5094 ( void** ) &renderClient );
5095 if ( FAILED( hr ) ) {
5096 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve render client handle.";
5100 // configure renderEvent to trigger on every available render buffer
5101 renderEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
5102 if ( !renderEvent ) {
5103 errorType = RtAudioError::SYSTEM_ERROR;
5104 errorText = "RtApiWasapi::wasapiThread: Unable to create render event.";
5108 hr = renderAudioClient->SetEventHandle( renderEvent );
5109 if ( FAILED( hr ) ) {
5110 errorText = "RtApiWasapi::wasapiThread: Unable to set render event handle.";
5114 ( ( WasapiHandle* ) stream_.apiHandle )->renderClient = renderClient;
5115 ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent = renderEvent;
5117 // reset the render stream
5118 hr = renderAudioClient->Reset();
5119 if ( FAILED( hr ) ) {
5120 errorText = "RtApiWasapi::wasapiThread: Unable to reset render stream.";
5124 // start the render stream
5125 hr = renderAudioClient->Start();
5126 if ( FAILED( hr ) ) {
5127 errorText = "RtApiWasapi::wasapiThread: Unable to start render stream.";
5132 unsigned int outBufferSize = 0;
5133 hr = renderAudioClient->GetBufferSize( &outBufferSize );
5134 if ( FAILED( hr ) ) {
5135 errorText = "RtApiWasapi::wasapiThread: Unable to get render buffer size.";
5139 // scale inBufferSize according to user->stream sample rate ratio
5140 unsigned int inBufferSize = ( unsigned int ) ceilf( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT];
5141 outBufferSize *= stream_.nDeviceChannels[OUTPUT];
5143 // set renderBuffer size
5144 renderBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[OUTPUT] ) );
5147 // malloc buffer memory
5148 if ( stream_.mode == INPUT )
5150 using namespace std; // for ceilf
5151 convBuffSize = ( size_t ) ( ceilf( stream_.bufferSize * captureSrRatio ) ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
5152 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
5154 else if ( stream_.mode == OUTPUT )
5156 convBuffSize = ( size_t ) ( ceilf( stream_.bufferSize * renderSrRatio ) ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
5157 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
5159 else if ( stream_.mode == DUPLEX )
5161 convBuffSize = std::max( ( size_t ) ( ceilf( stream_.bufferSize * captureSrRatio ) ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
5162 ( size_t ) ( ceilf( stream_.bufferSize * renderSrRatio ) ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
5163 deviceBuffSize = std::max( stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
5164 stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
5167 convBuffSize *= 2; // allow overflow for *SrRatio remainders
5168 convBuffer = ( char* ) calloc( convBuffSize, 1 );
5169 stream_.deviceBuffer = ( char* ) calloc( deviceBuffSize, 1 );
5170 if ( !convBuffer || !stream_.deviceBuffer ) {
5171 errorType = RtAudioError::MEMORY_ERROR;
5172 errorText = "RtApiWasapi::wasapiThread: Error allocating device buffer memory.";
5176 // stream process loop
5177 while ( stream_.state != STREAM_STOPPING ) {
5178 if ( !callbackPulled ) {
5181 // 1. Pull callback buffer from inputBuffer
5182 // 2. If 1. was successful: Convert callback buffer to user sample rate and channel count
5183 // Convert callback buffer to user format
5185 if ( captureAudioClient )
5187 int samplesToPull = ( unsigned int ) floorf( stream_.bufferSize * captureSrRatio );
5188 if ( captureSrRatio != 1 )
5190 // account for remainders
5195 while ( convBufferSize < stream_.bufferSize )
5197 // Pull callback buffer from inputBuffer
5198 callbackPulled = captureBuffer.pullBuffer( convBuffer,
5199 samplesToPull * stream_.nDeviceChannels[INPUT],
5200 stream_.deviceFormat[INPUT] );
5202 if ( !callbackPulled )
5207 // Convert callback buffer to user sample rate
5208 unsigned int deviceBufferOffset = convBufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
5209 unsigned int convSamples = 0;
5211 captureResampler->Convert( stream_.deviceBuffer + deviceBufferOffset,
5216 convBufferSize += convSamples;
5217 samplesToPull = 1; // now pull one sample at a time until we have stream_.bufferSize samples
5220 if ( callbackPulled )
5222 if ( stream_.doConvertBuffer[INPUT] ) {
5223 // Convert callback buffer to user format
5224 convertBuffer( stream_.userBuffer[INPUT],
5225 stream_.deviceBuffer,
5226 stream_.convertInfo[INPUT] );
5229 // no further conversion, simple copy deviceBuffer to userBuffer
5230 memcpy( stream_.userBuffer[INPUT],
5231 stream_.deviceBuffer,
5232 stream_.bufferSize * stream_.nUserChannels[INPUT] * formatBytes( stream_.userFormat ) );
5237 // if there is no capture stream, set callbackPulled flag
5238 callbackPulled = true;
5243 // 1. Execute user callback method
5244 // 2. Handle return value from callback
5246 // if callback has not requested the stream to stop
5247 if ( callbackPulled && !callbackStopped ) {
5248 // Execute user callback method
5249 callbackResult = callback( stream_.userBuffer[OUTPUT],
5250 stream_.userBuffer[INPUT],
5253 captureFlags & AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY ? RTAUDIO_INPUT_OVERFLOW : 0,
5254 stream_.callbackInfo.userData );
5257 RtApi::tickStreamTime();
5259 // Handle return value from callback
5260 if ( callbackResult == 1 ) {
5261 // instantiate a thread to stop this thread
5262 HANDLE threadHandle = CreateThread( NULL, 0, stopWasapiThread, this, 0, NULL );
5263 if ( !threadHandle ) {
5264 errorType = RtAudioError::THREAD_ERROR;
5265 errorText = "RtApiWasapi::wasapiThread: Unable to instantiate stream stop thread.";
5268 else if ( !CloseHandle( threadHandle ) ) {
5269 errorType = RtAudioError::THREAD_ERROR;
5270 errorText = "RtApiWasapi::wasapiThread: Unable to close stream stop thread handle.";
5274 callbackStopped = true;
5276 else if ( callbackResult == 2 ) {
5277 // instantiate a thread to stop this thread
5278 HANDLE threadHandle = CreateThread( NULL, 0, abortWasapiThread, this, 0, NULL );
5279 if ( !threadHandle ) {
5280 errorType = RtAudioError::THREAD_ERROR;
5281 errorText = "RtApiWasapi::wasapiThread: Unable to instantiate stream abort thread.";
5284 else if ( !CloseHandle( threadHandle ) ) {
5285 errorType = RtAudioError::THREAD_ERROR;
5286 errorText = "RtApiWasapi::wasapiThread: Unable to close stream abort thread handle.";
5290 callbackStopped = true;
5297 // 1. Convert callback buffer to stream format
5298 // 2. Convert callback buffer to stream sample rate and channel count
5299 // 3. Push callback buffer into outputBuffer
5301 if ( renderAudioClient && callbackPulled )
5303 // if the last call to renderBuffer.PushBuffer() was successful
5304 if ( callbackPushed || convBufferSize == 0 )
5306 if ( stream_.doConvertBuffer[OUTPUT] )
5308 // Convert callback buffer to stream format
5309 convertBuffer( stream_.deviceBuffer,
5310 stream_.userBuffer[OUTPUT],
5311 stream_.convertInfo[OUTPUT] );
5315 // no further conversion, simple copy userBuffer to deviceBuffer
5316 memcpy( stream_.deviceBuffer,
5317 stream_.userBuffer[OUTPUT],
5318 stream_.bufferSize * stream_.nUserChannels[OUTPUT] * formatBytes( stream_.userFormat ) );
5321 // Convert callback buffer to stream sample rate
5322 renderResampler->Convert( convBuffer,
5323 stream_.deviceBuffer,
5328 // Push callback buffer into outputBuffer
5329 callbackPushed = renderBuffer.pushBuffer( convBuffer,
5330 convBufferSize * stream_.nDeviceChannels[OUTPUT],
5331 stream_.deviceFormat[OUTPUT] );
5334 // if there is no render stream, set callbackPushed flag
5335 callbackPushed = true;
5340 // 1. Get capture buffer from stream
5341 // 2. Push capture buffer into inputBuffer
5342 // 3. If 2. was successful: Release capture buffer
5344 if ( captureAudioClient ) {
5345 // if the callback input buffer was not pulled from captureBuffer, wait for next capture event
5346 if ( !callbackPulled ) {
5347 WaitForSingleObject( loopbackEnabled ? renderEvent : captureEvent, INFINITE );
5350 // Get capture buffer from stream
5351 hr = captureClient->GetBuffer( &streamBuffer,
5353 &captureFlags, NULL, NULL );
5354 if ( FAILED( hr ) ) {
5355 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve capture buffer.";
5359 if ( bufferFrameCount != 0 ) {
5360 // Push capture buffer into inputBuffer
5361 if ( captureBuffer.pushBuffer( ( char* ) streamBuffer,
5362 bufferFrameCount * stream_.nDeviceChannels[INPUT],
5363 stream_.deviceFormat[INPUT] ) )
5365 // Release capture buffer
5366 hr = captureClient->ReleaseBuffer( bufferFrameCount );
5367 if ( FAILED( hr ) ) {
5368 errorText = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5374 // Inform WASAPI that capture was unsuccessful
5375 hr = captureClient->ReleaseBuffer( 0 );
5376 if ( FAILED( hr ) ) {
5377 errorText = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5384 // Inform WASAPI that capture was unsuccessful
5385 hr = captureClient->ReleaseBuffer( 0 );
5386 if ( FAILED( hr ) ) {
5387 errorText = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5395 // 1. Get render buffer from stream
5396 // 2. Pull next buffer from outputBuffer
5397 // 3. If 2. was successful: Fill render buffer with next buffer
5398 // Release render buffer
5400 if ( renderAudioClient ) {
5401 // if the callback output buffer was not pushed to renderBuffer, wait for next render event
5402 if ( callbackPulled && !callbackPushed ) {
5403 WaitForSingleObject( renderEvent, INFINITE );
5406 // Get render buffer from stream
5407 hr = renderAudioClient->GetBufferSize( &bufferFrameCount );
5408 if ( FAILED( hr ) ) {
5409 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer size.";
5413 hr = renderAudioClient->GetCurrentPadding( &numFramesPadding );
5414 if ( FAILED( hr ) ) {
5415 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer padding.";
5419 bufferFrameCount -= numFramesPadding;
5421 if ( bufferFrameCount != 0 ) {
5422 hr = renderClient->GetBuffer( bufferFrameCount, &streamBuffer );
5423 if ( FAILED( hr ) ) {
5424 errorText = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer.";
5428 // Pull next buffer from outputBuffer
5429 // Fill render buffer with next buffer
5430 if ( renderBuffer.pullBuffer( ( char* ) streamBuffer,
5431 bufferFrameCount * stream_.nDeviceChannels[OUTPUT],
5432 stream_.deviceFormat[OUTPUT] ) )
5434 // Release render buffer
5435 hr = renderClient->ReleaseBuffer( bufferFrameCount, 0 );
5436 if ( FAILED( hr ) ) {
5437 errorText = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5443 // Inform WASAPI that render was unsuccessful
5444 hr = renderClient->ReleaseBuffer( 0, 0 );
5445 if ( FAILED( hr ) ) {
5446 errorText = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5453 // Inform WASAPI that render was unsuccessful
5454 hr = renderClient->ReleaseBuffer( 0, 0 );
5455 if ( FAILED( hr ) ) {
5456 errorText = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5462 // if the callback buffer was pushed renderBuffer reset callbackPulled flag
5463 if ( callbackPushed ) {
5464 // unsetting the callbackPulled flag lets the stream know that
5465 // the audio device is ready for another callback output buffer.
5466 callbackPulled = false;
5473 CoTaskMemFree( captureFormat );
5474 CoTaskMemFree( renderFormat );
5476 free ( convBuffer );
5477 delete renderResampler;
5478 delete captureResampler;
5482 // update stream state
5483 stream_.state = STREAM_STOPPED;
5485 if ( !errorText.empty() )
5487 errorText_ = errorText;
5492 //******************** End of __WINDOWS_WASAPI__ *********************//
5496 #if defined(__WINDOWS_DS__) // Windows DirectSound API
5498 // Modified by Robin Davies, October 2005
5499 // - Improvements to DirectX pointer chasing.
5500 // - Bug fix for non-power-of-two Asio granularity used by Edirol PCR-A30.
5501 // - Auto-call CoInitialize for DSOUND and ASIO platforms.
5502 // Various revisions for RtAudio 4.0 by Gary Scavone, April 2007
5503 // Changed device query structure for RtAudio 4.0.7, January 2010
5505 #include <windows.h>
5506 #include <process.h>
5507 #include <mmsystem.h>
5511 #include <algorithm>
5513 #if defined(__MINGW32__)
5514 // missing from latest mingw winapi
5515 #define WAVE_FORMAT_96M08 0x00010000 /* 96 kHz, Mono, 8-bit */
5516 #define WAVE_FORMAT_96S08 0x00020000 /* 96 kHz, Stereo, 8-bit */
5517 #define WAVE_FORMAT_96M16 0x00040000 /* 96 kHz, Mono, 16-bit */
5518 #define WAVE_FORMAT_96S16 0x00080000 /* 96 kHz, Stereo, 16-bit */
5521 #define MINIMUM_DEVICE_BUFFER_SIZE 32768
5523 #ifdef _MSC_VER // if Microsoft Visual C++
5524 #pragma comment( lib, "winmm.lib" ) // then, auto-link winmm.lib. Otherwise, it has to be added manually.
5527 static inline DWORD dsPointerBetween( DWORD pointer, DWORD laterPointer, DWORD earlierPointer, DWORD bufferSize )
5529 if ( pointer > bufferSize ) pointer -= bufferSize;
5530 if ( laterPointer < earlierPointer ) laterPointer += bufferSize;
5531 if ( pointer < earlierPointer ) pointer += bufferSize;
5532 return pointer >= earlierPointer && pointer < laterPointer;
5535 // A structure to hold various information related to the DirectSound
5536 // API implementation.
5538 unsigned int drainCounter; // Tracks callback counts when draining
5539 bool internalDrain; // Indicates if stop is initiated from callback or not.
5543 UINT bufferPointer[2];
5544 DWORD dsBufferSize[2];
5545 DWORD dsPointerLeadTime[2]; // the number of bytes ahead of the safe pointer to lead by.
5549 :drainCounter(0), internalDrain(false) { id[0] = 0; id[1] = 0; buffer[0] = 0; buffer[1] = 0; xrun[0] = false; xrun[1] = false; bufferPointer[0] = 0; bufferPointer[1] = 0; }
5552 // Declarations for utility functions, callbacks, and structures
5553 // specific to the DirectSound implementation.
5554 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
5555 LPCTSTR description,
5559 static const char* getErrorString( int code );
5561 static unsigned __stdcall callbackHandler( void *ptr );
5570 : found(false) { validId[0] = false; validId[1] = false; }
5573 struct DsProbeData {
5575 std::vector<struct DsDevice>* dsDevices;
5578 RtApiDs :: RtApiDs()
5580 // Dsound will run both-threaded. If CoInitialize fails, then just
5581 // accept whatever the mainline chose for a threading model.
5582 coInitialized_ = false;
5583 HRESULT hr = CoInitialize( NULL );
5584 if ( !FAILED( hr ) ) coInitialized_ = true;
5587 RtApiDs :: ~RtApiDs()
5589 if ( stream_.state != STREAM_CLOSED ) closeStream();
5590 if ( coInitialized_ ) CoUninitialize(); // balanced call.
5593 // The DirectSound default output is always the first device.
5594 unsigned int RtApiDs :: getDefaultOutputDevice( void )
5599 // The DirectSound default input is always the first input device,
5600 // which is the first capture device enumerated.
5601 unsigned int RtApiDs :: getDefaultInputDevice( void )
5606 unsigned int RtApiDs :: getDeviceCount( void )
5608 // Set query flag for previously found devices to false, so that we
5609 // can check for any devices that have disappeared.
5610 for ( unsigned int i=0; i<dsDevices.size(); i++ )
5611 dsDevices[i].found = false;
5613 // Query DirectSound devices.
5614 struct DsProbeData probeInfo;
5615 probeInfo.isInput = false;
5616 probeInfo.dsDevices = &dsDevices;
5617 HRESULT result = DirectSoundEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
5618 if ( FAILED( result ) ) {
5619 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating output devices!";
5620 errorText_ = errorStream_.str();
5621 error( RtAudioError::WARNING );
5624 // Query DirectSoundCapture devices.
5625 probeInfo.isInput = true;
5626 result = DirectSoundCaptureEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
5627 if ( FAILED( result ) ) {
5628 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating input devices!";
5629 errorText_ = errorStream_.str();
5630 error( RtAudioError::WARNING );
5633 // Clean out any devices that may have disappeared (code update submitted by Eli Zehngut).
5634 for ( unsigned int i=0; i<dsDevices.size(); ) {
5635 if ( dsDevices[i].found == false ) dsDevices.erase( dsDevices.begin() + i );
5639 return static_cast<unsigned int>(dsDevices.size());
5642 RtAudio::DeviceInfo RtApiDs :: getDeviceInfo( unsigned int device )
5644 RtAudio::DeviceInfo info;
5645 info.probed = false;
5647 if ( dsDevices.size() == 0 ) {
5648 // Force a query of all devices
5650 if ( dsDevices.size() == 0 ) {
5651 errorText_ = "RtApiDs::getDeviceInfo: no devices found!";
5652 error( RtAudioError::INVALID_USE );
5657 if ( device >= dsDevices.size() ) {
5658 errorText_ = "RtApiDs::getDeviceInfo: device ID is invalid!";
5659 error( RtAudioError::INVALID_USE );
5664 if ( dsDevices[ device ].validId[0] == false ) goto probeInput;
5666 LPDIRECTSOUND output;
5668 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
5669 if ( FAILED( result ) ) {
5670 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
5671 errorText_ = errorStream_.str();
5672 error( RtAudioError::WARNING );
5676 outCaps.dwSize = sizeof( outCaps );
5677 result = output->GetCaps( &outCaps );
5678 if ( FAILED( result ) ) {
5680 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting capabilities!";
5681 errorText_ = errorStream_.str();
5682 error( RtAudioError::WARNING );
5686 // Get output channel information.
5687 info.outputChannels = ( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ? 2 : 1;
5689 // Get sample rate information.
5690 info.sampleRates.clear();
5691 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
5692 if ( SAMPLE_RATES[k] >= (unsigned int) outCaps.dwMinSecondarySampleRate &&
5693 SAMPLE_RATES[k] <= (unsigned int) outCaps.dwMaxSecondarySampleRate ) {
5694 info.sampleRates.push_back( SAMPLE_RATES[k] );
5696 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
5697 info.preferredSampleRate = SAMPLE_RATES[k];
5701 // Get format information.
5702 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT ) info.nativeFormats |= RTAUDIO_SINT16;
5703 if ( outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) info.nativeFormats |= RTAUDIO_SINT8;
5707 if ( getDefaultOutputDevice() == device )
5708 info.isDefaultOutput = true;
5710 if ( dsDevices[ device ].validId[1] == false ) {
5711 info.name = dsDevices[ device ].name;
5718 LPDIRECTSOUNDCAPTURE input;
5719 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
5720 if ( FAILED( result ) ) {
5721 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
5722 errorText_ = errorStream_.str();
5723 error( RtAudioError::WARNING );
5728 inCaps.dwSize = sizeof( inCaps );
5729 result = input->GetCaps( &inCaps );
5730 if ( FAILED( result ) ) {
5732 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting object capabilities (" << dsDevices[ device ].name << ")!";
5733 errorText_ = errorStream_.str();
5734 error( RtAudioError::WARNING );
5738 // Get input channel information.
5739 info.inputChannels = inCaps.dwChannels;
5741 // Get sample rate and format information.
5742 std::vector<unsigned int> rates;
5743 if ( inCaps.dwChannels >= 2 ) {
5744 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5745 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5746 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5747 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5748 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5749 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5750 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5751 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5753 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
5754 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) rates.push_back( 11025 );
5755 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) rates.push_back( 22050 );
5756 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) rates.push_back( 44100 );
5757 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) rates.push_back( 96000 );
5759 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
5760 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) rates.push_back( 11025 );
5761 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) rates.push_back( 22050 );
5762 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) rates.push_back( 44100 );
5763 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) rates.push_back( 96000 );
5766 else if ( inCaps.dwChannels == 1 ) {
5767 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5768 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5769 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5770 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5771 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5772 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5773 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5774 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5776 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
5777 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) rates.push_back( 11025 );
5778 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) rates.push_back( 22050 );
5779 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) rates.push_back( 44100 );
5780 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) rates.push_back( 96000 );
5782 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
5783 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) rates.push_back( 11025 );
5784 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) rates.push_back( 22050 );
5785 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) rates.push_back( 44100 );
5786 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) rates.push_back( 96000 );
5789 else info.inputChannels = 0; // technically, this would be an error
5793 if ( info.inputChannels == 0 ) return info;
5795 // Copy the supported rates to the info structure but avoid duplication.
5797 for ( unsigned int i=0; i<rates.size(); i++ ) {
5799 for ( unsigned int j=0; j<info.sampleRates.size(); j++ ) {
5800 if ( rates[i] == info.sampleRates[j] ) {
5805 if ( found == false ) info.sampleRates.push_back( rates[i] );
5807 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
5809 // If device opens for both playback and capture, we determine the channels.
5810 if ( info.outputChannels > 0 && info.inputChannels > 0 )
5811 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
5813 if ( device == 0 ) info.isDefaultInput = true;
5815 // Copy name and return.
5816 info.name = dsDevices[ device ].name;
5821 bool RtApiDs :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
5822 unsigned int firstChannel, unsigned int sampleRate,
5823 RtAudioFormat format, unsigned int *bufferSize,
5824 RtAudio::StreamOptions *options )
5826 if ( channels + firstChannel > 2 ) {
5827 errorText_ = "RtApiDs::probeDeviceOpen: DirectSound does not support more than 2 channels per device.";
5831 size_t nDevices = dsDevices.size();
5832 if ( nDevices == 0 ) {
5833 // This should not happen because a check is made before this function is called.
5834 errorText_ = "RtApiDs::probeDeviceOpen: no devices found!";
5838 if ( device >= nDevices ) {
5839 // This should not happen because a check is made before this function is called.
5840 errorText_ = "RtApiDs::probeDeviceOpen: device ID is invalid!";
5844 if ( mode == OUTPUT ) {
5845 if ( dsDevices[ device ].validId[0] == false ) {
5846 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support output!";
5847 errorText_ = errorStream_.str();
5851 else { // mode == INPUT
5852 if ( dsDevices[ device ].validId[1] == false ) {
5853 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support input!";
5854 errorText_ = errorStream_.str();
5859 // According to a note in PortAudio, using GetDesktopWindow()
5860 // instead of GetForegroundWindow() is supposed to avoid problems
5861 // that occur when the application's window is not the foreground
5862 // window. Also, if the application window closes before the
5863 // DirectSound buffer, DirectSound can crash. In the past, I had
5864 // problems when using GetDesktopWindow() but it seems fine now
5865 // (January 2010). I'll leave it commented here.
5866 // HWND hWnd = GetForegroundWindow();
5867 HWND hWnd = GetDesktopWindow();
5869 // Check the numberOfBuffers parameter and limit the lowest value to
5870 // two. This is a judgement call and a value of two is probably too
5871 // low for capture, but it should work for playback.
5873 if ( options ) nBuffers = options->numberOfBuffers;
5874 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) nBuffers = 2;
5875 if ( nBuffers < 2 ) nBuffers = 3;
5877 // Check the lower range of the user-specified buffer size and set
5878 // (arbitrarily) to a lower bound of 32.
5879 if ( *bufferSize < 32 ) *bufferSize = 32;
5881 // Create the wave format structure. The data format setting will
5882 // be determined later.
5883 WAVEFORMATEX waveFormat;
5884 ZeroMemory( &waveFormat, sizeof(WAVEFORMATEX) );
5885 waveFormat.wFormatTag = WAVE_FORMAT_PCM;
5886 waveFormat.nChannels = channels + firstChannel;
5887 waveFormat.nSamplesPerSec = (unsigned long) sampleRate;
5889 // Determine the device buffer size. By default, we'll use the value
5890 // defined above (32K), but we will grow it to make allowances for
5891 // very large software buffer sizes.
5892 DWORD dsBufferSize = MINIMUM_DEVICE_BUFFER_SIZE;
5893 DWORD dsPointerLeadTime = 0;
5895 void *ohandle = 0, *bhandle = 0;
5897 if ( mode == OUTPUT ) {
5899 LPDIRECTSOUND output;
5900 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
5901 if ( FAILED( result ) ) {
5902 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
5903 errorText_ = errorStream_.str();
5908 outCaps.dwSize = sizeof( outCaps );
5909 result = output->GetCaps( &outCaps );
5910 if ( FAILED( result ) ) {
5912 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting capabilities (" << dsDevices[ device ].name << ")!";
5913 errorText_ = errorStream_.str();
5917 // Check channel information.
5918 if ( channels + firstChannel == 2 && !( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ) {
5919 errorStream_ << "RtApiDs::getDeviceInfo: the output device (" << dsDevices[ device ].name << ") does not support stereo playback.";
5920 errorText_ = errorStream_.str();
5924 // Check format information. Use 16-bit format unless not
5925 // supported or user requests 8-bit.
5926 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT &&
5927 !( format == RTAUDIO_SINT8 && outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) ) {
5928 waveFormat.wBitsPerSample = 16;
5929 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
5932 waveFormat.wBitsPerSample = 8;
5933 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
5935 stream_.userFormat = format;
5937 // Update wave format structure and buffer information.
5938 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
5939 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
5940 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
5942 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
5943 while ( dsPointerLeadTime * 2U > dsBufferSize )
5946 // Set cooperative level to DSSCL_EXCLUSIVE ... sound stops when window focus changes.
5947 // result = output->SetCooperativeLevel( hWnd, DSSCL_EXCLUSIVE );
5948 // Set cooperative level to DSSCL_PRIORITY ... sound remains when window focus changes.
5949 result = output->SetCooperativeLevel( hWnd, DSSCL_PRIORITY );
5950 if ( FAILED( result ) ) {
5952 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting cooperative level (" << dsDevices[ device ].name << ")!";
5953 errorText_ = errorStream_.str();
5957 // Even though we will write to the secondary buffer, we need to
5958 // access the primary buffer to set the correct output format
5959 // (since the default is 8-bit, 22 kHz!). Setup the DS primary
5960 // buffer description.
5961 DSBUFFERDESC bufferDescription;
5962 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
5963 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
5964 bufferDescription.dwFlags = DSBCAPS_PRIMARYBUFFER;
5966 // Obtain the primary buffer
5967 LPDIRECTSOUNDBUFFER buffer;
5968 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
5969 if ( FAILED( result ) ) {
5971 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") accessing primary buffer (" << dsDevices[ device ].name << ")!";
5972 errorText_ = errorStream_.str();
5976 // Set the primary DS buffer sound format.
5977 result = buffer->SetFormat( &waveFormat );
5978 if ( FAILED( result ) ) {
5980 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting primary buffer format (" << dsDevices[ device ].name << ")!";
5981 errorText_ = errorStream_.str();
5985 // Setup the secondary DS buffer description.
5986 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
5987 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
5988 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
5989 DSBCAPS_GLOBALFOCUS |
5990 DSBCAPS_GETCURRENTPOSITION2 |
5991 DSBCAPS_LOCHARDWARE ); // Force hardware mixing
5992 bufferDescription.dwBufferBytes = dsBufferSize;
5993 bufferDescription.lpwfxFormat = &waveFormat;
5995 // Try to create the secondary DS buffer. If that doesn't work,
5996 // try to use software mixing. Otherwise, there's a problem.
5997 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
5998 if ( FAILED( result ) ) {
5999 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
6000 DSBCAPS_GLOBALFOCUS |
6001 DSBCAPS_GETCURRENTPOSITION2 |
6002 DSBCAPS_LOCSOFTWARE ); // Force software mixing
6003 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
6004 if ( FAILED( result ) ) {
6006 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating secondary buffer (" << dsDevices[ device ].name << ")!";
6007 errorText_ = errorStream_.str();
6012 // Get the buffer size ... might be different from what we specified.
6014 dsbcaps.dwSize = sizeof( DSBCAPS );
6015 result = buffer->GetCaps( &dsbcaps );
6016 if ( FAILED( result ) ) {
6019 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
6020 errorText_ = errorStream_.str();
6024 dsBufferSize = dsbcaps.dwBufferBytes;
6026 // Lock the DS buffer
6029 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
6030 if ( FAILED( result ) ) {
6033 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking buffer (" << dsDevices[ device ].name << ")!";
6034 errorText_ = errorStream_.str();
6038 // Zero the DS buffer
6039 ZeroMemory( audioPtr, dataLen );
6041 // Unlock the DS buffer
6042 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6043 if ( FAILED( result ) ) {
6046 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking buffer (" << dsDevices[ device ].name << ")!";
6047 errorText_ = errorStream_.str();
6051 ohandle = (void *) output;
6052 bhandle = (void *) buffer;
6055 if ( mode == INPUT ) {
6057 LPDIRECTSOUNDCAPTURE input;
6058 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
6059 if ( FAILED( result ) ) {
6060 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
6061 errorText_ = errorStream_.str();
6066 inCaps.dwSize = sizeof( inCaps );
6067 result = input->GetCaps( &inCaps );
6068 if ( FAILED( result ) ) {
6070 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting input capabilities (" << dsDevices[ device ].name << ")!";
6071 errorText_ = errorStream_.str();
6075 // Check channel information.
6076 if ( inCaps.dwChannels < channels + firstChannel ) {
6077 errorText_ = "RtApiDs::getDeviceInfo: the input device does not support requested input channels.";
6081 // Check format information. Use 16-bit format unless user
6083 DWORD deviceFormats;
6084 if ( channels + firstChannel == 2 ) {
6085 deviceFormats = WAVE_FORMAT_1S08 | WAVE_FORMAT_2S08 | WAVE_FORMAT_4S08 | WAVE_FORMAT_96S08;
6086 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
6087 waveFormat.wBitsPerSample = 8;
6088 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
6090 else { // assume 16-bit is supported
6091 waveFormat.wBitsPerSample = 16;
6092 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
6095 else { // channel == 1
6096 deviceFormats = WAVE_FORMAT_1M08 | WAVE_FORMAT_2M08 | WAVE_FORMAT_4M08 | WAVE_FORMAT_96M08;
6097 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
6098 waveFormat.wBitsPerSample = 8;
6099 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
6101 else { // assume 16-bit is supported
6102 waveFormat.wBitsPerSample = 16;
6103 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
6106 stream_.userFormat = format;
6108 // Update wave format structure and buffer information.
6109 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
6110 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
6111 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
6113 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
6114 while ( dsPointerLeadTime * 2U > dsBufferSize )
6117 // Setup the secondary DS buffer description.
6118 DSCBUFFERDESC bufferDescription;
6119 ZeroMemory( &bufferDescription, sizeof( DSCBUFFERDESC ) );
6120 bufferDescription.dwSize = sizeof( DSCBUFFERDESC );
6121 bufferDescription.dwFlags = 0;
6122 bufferDescription.dwReserved = 0;
6123 bufferDescription.dwBufferBytes = dsBufferSize;
6124 bufferDescription.lpwfxFormat = &waveFormat;
6126 // Create the capture buffer.
6127 LPDIRECTSOUNDCAPTUREBUFFER buffer;
6128 result = input->CreateCaptureBuffer( &bufferDescription, &buffer, NULL );
6129 if ( FAILED( result ) ) {
6131 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating input buffer (" << dsDevices[ device ].name << ")!";
6132 errorText_ = errorStream_.str();
6136 // Get the buffer size ... might be different from what we specified.
6138 dscbcaps.dwSize = sizeof( DSCBCAPS );
6139 result = buffer->GetCaps( &dscbcaps );
6140 if ( FAILED( result ) ) {
6143 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
6144 errorText_ = errorStream_.str();
6148 dsBufferSize = dscbcaps.dwBufferBytes;
6150 // NOTE: We could have a problem here if this is a duplex stream
6151 // and the play and capture hardware buffer sizes are different
6152 // (I'm actually not sure if that is a problem or not).
6153 // Currently, we are not verifying that.
6155 // Lock the capture buffer
6158 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
6159 if ( FAILED( result ) ) {
6162 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking input buffer (" << dsDevices[ device ].name << ")!";
6163 errorText_ = errorStream_.str();
6168 ZeroMemory( audioPtr, dataLen );
6170 // Unlock the buffer
6171 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6172 if ( FAILED( result ) ) {
6175 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking input buffer (" << dsDevices[ device ].name << ")!";
6176 errorText_ = errorStream_.str();
6180 ohandle = (void *) input;
6181 bhandle = (void *) buffer;
6184 // Set various stream parameters
6185 DsHandle *handle = 0;
6186 stream_.nDeviceChannels[mode] = channels + firstChannel;
6187 stream_.nUserChannels[mode] = channels;
6188 stream_.bufferSize = *bufferSize;
6189 stream_.channelOffset[mode] = firstChannel;
6190 stream_.deviceInterleaved[mode] = true;
6191 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
6192 else stream_.userInterleaved = true;
6194 // Set flag for buffer conversion
6195 stream_.doConvertBuffer[mode] = false;
6196 if (stream_.nUserChannels[mode] != stream_.nDeviceChannels[mode])
6197 stream_.doConvertBuffer[mode] = true;
6198 if (stream_.userFormat != stream_.deviceFormat[mode])
6199 stream_.doConvertBuffer[mode] = true;
6200 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
6201 stream_.nUserChannels[mode] > 1 )
6202 stream_.doConvertBuffer[mode] = true;
6204 // Allocate necessary internal buffers
6205 long bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
6206 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
6207 if ( stream_.userBuffer[mode] == NULL ) {
6208 errorText_ = "RtApiDs::probeDeviceOpen: error allocating user buffer memory.";
6212 if ( stream_.doConvertBuffer[mode] ) {
6214 bool makeBuffer = true;
6215 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
6216 if ( mode == INPUT ) {
6217 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
6218 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
6219 if ( bufferBytes <= (long) bytesOut ) makeBuffer = false;
6224 bufferBytes *= *bufferSize;
6225 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
6226 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
6227 if ( stream_.deviceBuffer == NULL ) {
6228 errorText_ = "RtApiDs::probeDeviceOpen: error allocating device buffer memory.";
6234 // Allocate our DsHandle structures for the stream.
6235 if ( stream_.apiHandle == 0 ) {
6237 handle = new DsHandle;
6239 catch ( std::bad_alloc& ) {
6240 errorText_ = "RtApiDs::probeDeviceOpen: error allocating AsioHandle memory.";
6244 // Create a manual-reset event.
6245 handle->condition = CreateEvent( NULL, // no security
6246 TRUE, // manual-reset
6247 FALSE, // non-signaled initially
6249 stream_.apiHandle = (void *) handle;
6252 handle = (DsHandle *) stream_.apiHandle;
6253 handle->id[mode] = ohandle;
6254 handle->buffer[mode] = bhandle;
6255 handle->dsBufferSize[mode] = dsBufferSize;
6256 handle->dsPointerLeadTime[mode] = dsPointerLeadTime;
6258 stream_.device[mode] = device;
6259 stream_.state = STREAM_STOPPED;
6260 if ( stream_.mode == OUTPUT && mode == INPUT )
6261 // We had already set up an output stream.
6262 stream_.mode = DUPLEX;
6264 stream_.mode = mode;
6265 stream_.nBuffers = nBuffers;
6266 stream_.sampleRate = sampleRate;
6268 // Setup the buffer conversion information structure.
6269 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
6271 // Setup the callback thread.
6272 if ( stream_.callbackInfo.isRunning == false ) {
6274 stream_.callbackInfo.isRunning = true;
6275 stream_.callbackInfo.object = (void *) this;
6276 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &callbackHandler,
6277 &stream_.callbackInfo, 0, &threadId );
6278 if ( stream_.callbackInfo.thread == 0 ) {
6279 errorText_ = "RtApiDs::probeDeviceOpen: error creating callback thread!";
6283 // Boost DS thread priority
6284 SetThreadPriority( (HANDLE) stream_.callbackInfo.thread, THREAD_PRIORITY_HIGHEST );
6290 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
6291 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
6292 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6293 if ( buffer ) buffer->Release();
6296 if ( handle->buffer[1] ) {
6297 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
6298 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6299 if ( buffer ) buffer->Release();
6302 CloseHandle( handle->condition );
6304 stream_.apiHandle = 0;
6307 for ( int i=0; i<2; i++ ) {
6308 if ( stream_.userBuffer[i] ) {
6309 free( stream_.userBuffer[i] );
6310 stream_.userBuffer[i] = 0;
6314 if ( stream_.deviceBuffer ) {
6315 free( stream_.deviceBuffer );
6316 stream_.deviceBuffer = 0;
6319 stream_.state = STREAM_CLOSED;
6323 void RtApiDs :: closeStream()
6325 if ( stream_.state == STREAM_CLOSED ) {
6326 errorText_ = "RtApiDs::closeStream(): no open stream to close!";
6327 error( RtAudioError::WARNING );
6331 // Stop the callback thread.
6332 stream_.callbackInfo.isRunning = false;
6333 WaitForSingleObject( (HANDLE) stream_.callbackInfo.thread, INFINITE );
6334 CloseHandle( (HANDLE) stream_.callbackInfo.thread );
6336 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6338 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
6339 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
6340 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6347 if ( handle->buffer[1] ) {
6348 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
6349 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6356 CloseHandle( handle->condition );
6358 stream_.apiHandle = 0;
6361 for ( int i=0; i<2; i++ ) {
6362 if ( stream_.userBuffer[i] ) {
6363 free( stream_.userBuffer[i] );
6364 stream_.userBuffer[i] = 0;
6368 if ( stream_.deviceBuffer ) {
6369 free( stream_.deviceBuffer );
6370 stream_.deviceBuffer = 0;
6373 stream_.mode = UNINITIALIZED;
6374 stream_.state = STREAM_CLOSED;
6377 void RtApiDs :: startStream()
6380 RtApi::startStream();
6381 if ( stream_.state == STREAM_RUNNING ) {
6382 errorText_ = "RtApiDs::startStream(): the stream is already running!";
6383 error( RtAudioError::WARNING );
6387 #if defined( HAVE_GETTIMEOFDAY )
6388 gettimeofday( &stream_.lastTickTimestamp, NULL );
6391 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6393 // Increase scheduler frequency on lesser windows (a side-effect of
6394 // increasing timer accuracy). On greater windows (Win2K or later),
6395 // this is already in effect.
6396 timeBeginPeriod( 1 );
6398 buffersRolling = false;
6399 duplexPrerollBytes = 0;
6401 if ( stream_.mode == DUPLEX ) {
6402 // 0.5 seconds of silence in DUPLEX mode while the devices spin up and synchronize.
6403 duplexPrerollBytes = (int) ( 0.5 * stream_.sampleRate * formatBytes( stream_.deviceFormat[1] ) * stream_.nDeviceChannels[1] );
6407 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6409 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6410 result = buffer->Play( 0, 0, DSBPLAY_LOOPING );
6411 if ( FAILED( result ) ) {
6412 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting output buffer!";
6413 errorText_ = errorStream_.str();
6418 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6420 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6421 result = buffer->Start( DSCBSTART_LOOPING );
6422 if ( FAILED( result ) ) {
6423 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting input buffer!";
6424 errorText_ = errorStream_.str();
6429 handle->drainCounter = 0;
6430 handle->internalDrain = false;
6431 ResetEvent( handle->condition );
6432 stream_.state = STREAM_RUNNING;
6435 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
6438 void RtApiDs :: stopStream()
6441 RtApi::startStream();
6442 if ( stream_.state == STREAM_STOPPED ) {
6443 errorText_ = "RtApiDs::stopStream(): the stream is already stopped!";
6444 error( RtAudioError::WARNING );
6451 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6452 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6453 if ( handle->drainCounter == 0 ) {
6454 handle->drainCounter = 2;
6455 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
6458 stream_.state = STREAM_STOPPED;
6460 MUTEX_LOCK( &stream_.mutex );
6462 // Stop the buffer and clear memory
6463 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6464 result = buffer->Stop();
6465 if ( FAILED( result ) ) {
6466 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping output buffer!";
6467 errorText_ = errorStream_.str();
6471 // Lock the buffer and clear it so that if we start to play again,
6472 // we won't have old data playing.
6473 result = buffer->Lock( 0, handle->dsBufferSize[0], &audioPtr, &dataLen, NULL, NULL, 0 );
6474 if ( FAILED( result ) ) {
6475 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking output buffer!";
6476 errorText_ = errorStream_.str();
6480 // Zero the DS buffer
6481 ZeroMemory( audioPtr, dataLen );
6483 // Unlock the DS buffer
6484 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6485 if ( FAILED( result ) ) {
6486 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking output buffer!";
6487 errorText_ = errorStream_.str();
6491 // If we start playing again, we must begin at beginning of buffer.
6492 handle->bufferPointer[0] = 0;
6495 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6496 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6500 stream_.state = STREAM_STOPPED;
6502 if ( stream_.mode != DUPLEX )
6503 MUTEX_LOCK( &stream_.mutex );
6505 result = buffer->Stop();
6506 if ( FAILED( result ) ) {
6507 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping input buffer!";
6508 errorText_ = errorStream_.str();
6512 // Lock the buffer and clear it so that if we start to play again,
6513 // we won't have old data playing.
6514 result = buffer->Lock( 0, handle->dsBufferSize[1], &audioPtr, &dataLen, NULL, NULL, 0 );
6515 if ( FAILED( result ) ) {
6516 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking input buffer!";
6517 errorText_ = errorStream_.str();
6521 // Zero the DS buffer
6522 ZeroMemory( audioPtr, dataLen );
6524 // Unlock the DS buffer
6525 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6526 if ( FAILED( result ) ) {
6527 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking input buffer!";
6528 errorText_ = errorStream_.str();
6532 // If we start recording again, we must begin at beginning of buffer.
6533 handle->bufferPointer[1] = 0;
6537 timeEndPeriod( 1 ); // revert to normal scheduler frequency on lesser windows.
6538 MUTEX_UNLOCK( &stream_.mutex );
6540 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
6543 void RtApiDs :: abortStream()
6546 if ( stream_.state == STREAM_STOPPED ) {
6547 errorText_ = "RtApiDs::abortStream(): the stream is already stopped!";
6548 error( RtAudioError::WARNING );
6552 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6553 handle->drainCounter = 2;
6558 void RtApiDs :: callbackEvent()
6560 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) {
6561 Sleep( 50 ); // sleep 50 milliseconds
6565 if ( stream_.state == STREAM_CLOSED ) {
6566 errorText_ = "RtApiDs::callbackEvent(): the stream is closed ... this shouldn't happen!";
6567 error( RtAudioError::WARNING );
6571 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
6572 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6574 // Check if we were draining the stream and signal is finished.
6575 if ( handle->drainCounter > stream_.nBuffers + 2 ) {
6577 stream_.state = STREAM_STOPPING;
6578 if ( handle->internalDrain == false )
6579 SetEvent( handle->condition );
6585 // Invoke user callback to get fresh output data UNLESS we are
6587 if ( handle->drainCounter == 0 ) {
6588 RtAudioCallback callback = (RtAudioCallback) info->callback;
6589 double streamTime = getStreamTime();
6590 RtAudioStreamStatus status = 0;
6591 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
6592 status |= RTAUDIO_OUTPUT_UNDERFLOW;
6593 handle->xrun[0] = false;
6595 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
6596 status |= RTAUDIO_INPUT_OVERFLOW;
6597 handle->xrun[1] = false;
6599 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
6600 stream_.bufferSize, streamTime, status, info->userData );
6601 if ( cbReturnValue == 2 ) {
6602 stream_.state = STREAM_STOPPING;
6603 handle->drainCounter = 2;
6607 else if ( cbReturnValue == 1 ) {
6608 handle->drainCounter = 1;
6609 handle->internalDrain = true;
6614 DWORD currentWritePointer, safeWritePointer;
6615 DWORD currentReadPointer, safeReadPointer;
6616 UINT nextWritePointer;
6618 LPVOID buffer1 = NULL;
6619 LPVOID buffer2 = NULL;
6620 DWORD bufferSize1 = 0;
6621 DWORD bufferSize2 = 0;
6626 MUTEX_LOCK( &stream_.mutex );
6627 if ( stream_.state == STREAM_STOPPED ) {
6628 MUTEX_UNLOCK( &stream_.mutex );
6632 if ( buffersRolling == false ) {
6633 if ( stream_.mode == DUPLEX ) {
6634 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
6636 // It takes a while for the devices to get rolling. As a result,
6637 // there's no guarantee that the capture and write device pointers
6638 // will move in lockstep. Wait here for both devices to start
6639 // rolling, and then set our buffer pointers accordingly.
6640 // e.g. Crystal Drivers: the capture buffer starts up 5700 to 9600
6641 // bytes later than the write buffer.
6643 // Stub: a serious risk of having a pre-emptive scheduling round
6644 // take place between the two GetCurrentPosition calls... but I'm
6645 // really not sure how to solve the problem. Temporarily boost to
6646 // Realtime priority, maybe; but I'm not sure what priority the
6647 // DirectSound service threads run at. We *should* be roughly
6648 // within a ms or so of correct.
6650 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6651 LPDIRECTSOUNDCAPTUREBUFFER dsCaptureBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6653 DWORD startSafeWritePointer, startSafeReadPointer;
6655 result = dsWriteBuffer->GetCurrentPosition( NULL, &startSafeWritePointer );
6656 if ( FAILED( result ) ) {
6657 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6658 errorText_ = errorStream_.str();
6659 MUTEX_UNLOCK( &stream_.mutex );
6660 error( RtAudioError::SYSTEM_ERROR );
6663 result = dsCaptureBuffer->GetCurrentPosition( NULL, &startSafeReadPointer );
6664 if ( FAILED( result ) ) {
6665 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6666 errorText_ = errorStream_.str();
6667 MUTEX_UNLOCK( &stream_.mutex );
6668 error( RtAudioError::SYSTEM_ERROR );
6672 result = dsWriteBuffer->GetCurrentPosition( NULL, &safeWritePointer );
6673 if ( FAILED( result ) ) {
6674 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6675 errorText_ = errorStream_.str();
6676 MUTEX_UNLOCK( &stream_.mutex );
6677 error( RtAudioError::SYSTEM_ERROR );
6680 result = dsCaptureBuffer->GetCurrentPosition( NULL, &safeReadPointer );
6681 if ( FAILED( result ) ) {
6682 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6683 errorText_ = errorStream_.str();
6684 MUTEX_UNLOCK( &stream_.mutex );
6685 error( RtAudioError::SYSTEM_ERROR );
6688 if ( safeWritePointer != startSafeWritePointer && safeReadPointer != startSafeReadPointer ) break;
6692 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
6694 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
6695 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
6696 handle->bufferPointer[1] = safeReadPointer;
6698 else if ( stream_.mode == OUTPUT ) {
6700 // Set the proper nextWritePosition after initial startup.
6701 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6702 result = dsWriteBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
6703 if ( FAILED( result ) ) {
6704 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6705 errorText_ = errorStream_.str();
6706 MUTEX_UNLOCK( &stream_.mutex );
6707 error( RtAudioError::SYSTEM_ERROR );
6710 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
6711 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
6714 buffersRolling = true;
6717 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6719 LPDIRECTSOUNDBUFFER dsBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6721 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
6722 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
6723 bufferBytes *= formatBytes( stream_.userFormat );
6724 memset( stream_.userBuffer[0], 0, bufferBytes );
6727 // Setup parameters and do buffer conversion if necessary.
6728 if ( stream_.doConvertBuffer[0] ) {
6729 buffer = stream_.deviceBuffer;
6730 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
6731 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[0];
6732 bufferBytes *= formatBytes( stream_.deviceFormat[0] );
6735 buffer = stream_.userBuffer[0];
6736 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
6737 bufferBytes *= formatBytes( stream_.userFormat );
6740 // No byte swapping necessary in DirectSound implementation.
6742 // Ahhh ... windoze. 16-bit data is signed but 8-bit data is
6743 // unsigned. So, we need to convert our signed 8-bit data here to
6745 if ( stream_.deviceFormat[0] == RTAUDIO_SINT8 )
6746 for ( int i=0; i<bufferBytes; i++ ) buffer[i] = (unsigned char) ( buffer[i] + 128 );
6748 DWORD dsBufferSize = handle->dsBufferSize[0];
6749 nextWritePointer = handle->bufferPointer[0];
6751 DWORD endWrite, leadPointer;
6753 // Find out where the read and "safe write" pointers are.
6754 result = dsBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
6755 if ( FAILED( result ) ) {
6756 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6757 errorText_ = errorStream_.str();
6758 MUTEX_UNLOCK( &stream_.mutex );
6759 error( RtAudioError::SYSTEM_ERROR );
6763 // We will copy our output buffer into the region between
6764 // safeWritePointer and leadPointer. If leadPointer is not
6765 // beyond the next endWrite position, wait until it is.
6766 leadPointer = safeWritePointer + handle->dsPointerLeadTime[0];
6767 //std::cout << "safeWritePointer = " << safeWritePointer << ", leadPointer = " << leadPointer << ", nextWritePointer = " << nextWritePointer << std::endl;
6768 if ( leadPointer > dsBufferSize ) leadPointer -= dsBufferSize;
6769 if ( leadPointer < nextWritePointer ) leadPointer += dsBufferSize; // unwrap offset
6770 endWrite = nextWritePointer + bufferBytes;
6772 // Check whether the entire write region is behind the play pointer.
6773 if ( leadPointer >= endWrite ) break;
6775 // If we are here, then we must wait until the leadPointer advances
6776 // beyond the end of our next write region. We use the
6777 // Sleep() function to suspend operation until that happens.
6778 double millis = ( endWrite - leadPointer ) * 1000.0;
6779 millis /= ( formatBytes( stream_.deviceFormat[0]) * stream_.nDeviceChannels[0] * stream_.sampleRate);
6780 if ( millis < 1.0 ) millis = 1.0;
6781 Sleep( (DWORD) millis );
6784 if ( dsPointerBetween( nextWritePointer, safeWritePointer, currentWritePointer, dsBufferSize )
6785 || dsPointerBetween( endWrite, safeWritePointer, currentWritePointer, dsBufferSize ) ) {
6786 // We've strayed into the forbidden zone ... resync the read pointer.
6787 handle->xrun[0] = true;
6788 nextWritePointer = safeWritePointer + handle->dsPointerLeadTime[0] - bufferBytes;
6789 if ( nextWritePointer >= dsBufferSize ) nextWritePointer -= dsBufferSize;
6790 handle->bufferPointer[0] = nextWritePointer;
6791 endWrite = nextWritePointer + bufferBytes;
6794 // Lock free space in the buffer
6795 result = dsBuffer->Lock( nextWritePointer, bufferBytes, &buffer1,
6796 &bufferSize1, &buffer2, &bufferSize2, 0 );
6797 if ( FAILED( result ) ) {
6798 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking buffer during playback!";
6799 errorText_ = errorStream_.str();
6800 MUTEX_UNLOCK( &stream_.mutex );
6801 error( RtAudioError::SYSTEM_ERROR );
6805 // Copy our buffer into the DS buffer
6806 CopyMemory( buffer1, buffer, bufferSize1 );
6807 if ( buffer2 != NULL ) CopyMemory( buffer2, buffer+bufferSize1, bufferSize2 );
6809 // Update our buffer offset and unlock sound buffer
6810 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
6811 if ( FAILED( result ) ) {
6812 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking buffer during playback!";
6813 errorText_ = errorStream_.str();
6814 MUTEX_UNLOCK( &stream_.mutex );
6815 error( RtAudioError::SYSTEM_ERROR );
6818 nextWritePointer = ( nextWritePointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
6819 handle->bufferPointer[0] = nextWritePointer;
6822 // Don't bother draining input
6823 if ( handle->drainCounter ) {
6824 handle->drainCounter++;
6828 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6830 // Setup parameters.
6831 if ( stream_.doConvertBuffer[1] ) {
6832 buffer = stream_.deviceBuffer;
6833 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[1];
6834 bufferBytes *= formatBytes( stream_.deviceFormat[1] );
6837 buffer = stream_.userBuffer[1];
6838 bufferBytes = stream_.bufferSize * stream_.nUserChannels[1];
6839 bufferBytes *= formatBytes( stream_.userFormat );
6842 LPDIRECTSOUNDCAPTUREBUFFER dsBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6843 long nextReadPointer = handle->bufferPointer[1];
6844 DWORD dsBufferSize = handle->dsBufferSize[1];
6846 // Find out where the write and "safe read" pointers are.
6847 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
6848 if ( FAILED( result ) ) {
6849 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6850 errorText_ = errorStream_.str();
6851 MUTEX_UNLOCK( &stream_.mutex );
6852 error( RtAudioError::SYSTEM_ERROR );
6856 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
6857 DWORD endRead = nextReadPointer + bufferBytes;
6859 // Handling depends on whether we are INPUT or DUPLEX.
6860 // If we're in INPUT mode then waiting is a good thing. If we're in DUPLEX mode,
6861 // then a wait here will drag the write pointers into the forbidden zone.
6863 // In DUPLEX mode, rather than wait, we will back off the read pointer until
6864 // it's in a safe position. This causes dropouts, but it seems to be the only
6865 // practical way to sync up the read and write pointers reliably, given the
6866 // the very complex relationship between phase and increment of the read and write
6869 // In order to minimize audible dropouts in DUPLEX mode, we will
6870 // provide a pre-roll period of 0.5 seconds in which we return
6871 // zeros from the read buffer while the pointers sync up.
6873 if ( stream_.mode == DUPLEX ) {
6874 if ( safeReadPointer < endRead ) {
6875 if ( duplexPrerollBytes <= 0 ) {
6876 // Pre-roll time over. Be more agressive.
6877 int adjustment = endRead-safeReadPointer;
6879 handle->xrun[1] = true;
6881 // - large adjustments: we've probably run out of CPU cycles, so just resync exactly,
6882 // and perform fine adjustments later.
6883 // - small adjustments: back off by twice as much.
6884 if ( adjustment >= 2*bufferBytes )
6885 nextReadPointer = safeReadPointer-2*bufferBytes;
6887 nextReadPointer = safeReadPointer-bufferBytes-adjustment;
6889 if ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
6893 // In pre=roll time. Just do it.
6894 nextReadPointer = safeReadPointer - bufferBytes;
6895 while ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
6897 endRead = nextReadPointer + bufferBytes;
6900 else { // mode == INPUT
6901 while ( safeReadPointer < endRead && stream_.callbackInfo.isRunning ) {
6902 // See comments for playback.
6903 double millis = (endRead - safeReadPointer) * 1000.0;
6904 millis /= ( formatBytes(stream_.deviceFormat[1]) * stream_.nDeviceChannels[1] * stream_.sampleRate);
6905 if ( millis < 1.0 ) millis = 1.0;
6906 Sleep( (DWORD) millis );
6908 // Wake up and find out where we are now.
6909 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
6910 if ( FAILED( result ) ) {
6911 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6912 errorText_ = errorStream_.str();
6913 MUTEX_UNLOCK( &stream_.mutex );
6914 error( RtAudioError::SYSTEM_ERROR );
6918 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
6922 // Lock free space in the buffer
6923 result = dsBuffer->Lock( nextReadPointer, bufferBytes, &buffer1,
6924 &bufferSize1, &buffer2, &bufferSize2, 0 );
6925 if ( FAILED( result ) ) {
6926 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking capture buffer!";
6927 errorText_ = errorStream_.str();
6928 MUTEX_UNLOCK( &stream_.mutex );
6929 error( RtAudioError::SYSTEM_ERROR );
6933 if ( duplexPrerollBytes <= 0 ) {
6934 // Copy our buffer into the DS buffer
6935 CopyMemory( buffer, buffer1, bufferSize1 );
6936 if ( buffer2 != NULL ) CopyMemory( buffer+bufferSize1, buffer2, bufferSize2 );
6939 memset( buffer, 0, bufferSize1 );
6940 if ( buffer2 != NULL ) memset( buffer + bufferSize1, 0, bufferSize2 );
6941 duplexPrerollBytes -= bufferSize1 + bufferSize2;
6944 // Update our buffer offset and unlock sound buffer
6945 nextReadPointer = ( nextReadPointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
6946 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
6947 if ( FAILED( result ) ) {
6948 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking capture buffer!";
6949 errorText_ = errorStream_.str();
6950 MUTEX_UNLOCK( &stream_.mutex );
6951 error( RtAudioError::SYSTEM_ERROR );
6954 handle->bufferPointer[1] = nextReadPointer;
6956 // No byte swapping necessary in DirectSound implementation.
6958 // If necessary, convert 8-bit data from unsigned to signed.
6959 if ( stream_.deviceFormat[1] == RTAUDIO_SINT8 )
6960 for ( int j=0; j<bufferBytes; j++ ) buffer[j] = (signed char) ( buffer[j] - 128 );
6962 // Do buffer conversion if necessary.
6963 if ( stream_.doConvertBuffer[1] )
6964 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
6968 MUTEX_UNLOCK( &stream_.mutex );
6969 RtApi::tickStreamTime();
6972 // Definitions for utility functions and callbacks
6973 // specific to the DirectSound implementation.
6975 static unsigned __stdcall callbackHandler( void *ptr )
6977 CallbackInfo *info = (CallbackInfo *) ptr;
6978 RtApiDs *object = (RtApiDs *) info->object;
6979 bool* isRunning = &info->isRunning;
6981 while ( *isRunning == true ) {
6982 object->callbackEvent();
6989 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
6990 LPCTSTR description,
6994 struct DsProbeData& probeInfo = *(struct DsProbeData*) lpContext;
6995 std::vector<struct DsDevice>& dsDevices = *probeInfo.dsDevices;
6998 bool validDevice = false;
6999 if ( probeInfo.isInput == true ) {
7001 LPDIRECTSOUNDCAPTURE object;
7003 hr = DirectSoundCaptureCreate( lpguid, &object, NULL );
7004 if ( hr != DS_OK ) return TRUE;
7006 caps.dwSize = sizeof(caps);
7007 hr = object->GetCaps( &caps );
7008 if ( hr == DS_OK ) {
7009 if ( caps.dwChannels > 0 && caps.dwFormats > 0 )
7016 LPDIRECTSOUND object;
7017 hr = DirectSoundCreate( lpguid, &object, NULL );
7018 if ( hr != DS_OK ) return TRUE;
7020 caps.dwSize = sizeof(caps);
7021 hr = object->GetCaps( &caps );
7022 if ( hr == DS_OK ) {
7023 if ( caps.dwFlags & DSCAPS_PRIMARYMONO || caps.dwFlags & DSCAPS_PRIMARYSTEREO )
7029 // If good device, then save its name and guid.
7030 std::string name = convertCharPointerToStdString( description );
7031 //if ( name == "Primary Sound Driver" || name == "Primary Sound Capture Driver" )
7032 if ( lpguid == NULL )
7033 name = "Default Device";
7034 if ( validDevice ) {
7035 for ( unsigned int i=0; i<dsDevices.size(); i++ ) {
7036 if ( dsDevices[i].name == name ) {
7037 dsDevices[i].found = true;
7038 if ( probeInfo.isInput ) {
7039 dsDevices[i].id[1] = lpguid;
7040 dsDevices[i].validId[1] = true;
7043 dsDevices[i].id[0] = lpguid;
7044 dsDevices[i].validId[0] = true;
7052 device.found = true;
7053 if ( probeInfo.isInput ) {
7054 device.id[1] = lpguid;
7055 device.validId[1] = true;
7058 device.id[0] = lpguid;
7059 device.validId[0] = true;
7061 dsDevices.push_back( device );
7067 static const char* getErrorString( int code )
7071 case DSERR_ALLOCATED:
7072 return "Already allocated";
7074 case DSERR_CONTROLUNAVAIL:
7075 return "Control unavailable";
7077 case DSERR_INVALIDPARAM:
7078 return "Invalid parameter";
7080 case DSERR_INVALIDCALL:
7081 return "Invalid call";
7084 return "Generic error";
7086 case DSERR_PRIOLEVELNEEDED:
7087 return "Priority level needed";
7089 case DSERR_OUTOFMEMORY:
7090 return "Out of memory";
7092 case DSERR_BADFORMAT:
7093 return "The sample rate or the channel format is not supported";
7095 case DSERR_UNSUPPORTED:
7096 return "Not supported";
7098 case DSERR_NODRIVER:
7101 case DSERR_ALREADYINITIALIZED:
7102 return "Already initialized";
7104 case DSERR_NOAGGREGATION:
7105 return "No aggregation";
7107 case DSERR_BUFFERLOST:
7108 return "Buffer lost";
7110 case DSERR_OTHERAPPHASPRIO:
7111 return "Another application already has priority";
7113 case DSERR_UNINITIALIZED:
7114 return "Uninitialized";
7117 return "DirectSound unknown error";
7120 //******************** End of __WINDOWS_DS__ *********************//
7124 #if defined(__LINUX_ALSA__)
7126 #include <alsa/asoundlib.h>
7129 // A structure to hold various information related to the ALSA API
7132 snd_pcm_t *handles[2];
7135 pthread_cond_t runnable_cv;
7139 :synchronized(false), runnable(false) { xrun[0] = false; xrun[1] = false; }
7142 static void *alsaCallbackHandler( void * ptr );
7144 RtApiAlsa :: RtApiAlsa()
7146 // Nothing to do here.
7149 RtApiAlsa :: ~RtApiAlsa()
7151 if ( stream_.state != STREAM_CLOSED ) closeStream();
7154 unsigned int RtApiAlsa :: getDeviceCount( void )
7156 unsigned nDevices = 0;
7157 int result, subdevice, card;
7159 snd_ctl_t *handle = 0;
7161 // Count cards and devices
7163 snd_card_next( &card );
7164 while ( card >= 0 ) {
7165 sprintf( name, "hw:%d", card );
7166 result = snd_ctl_open( &handle, name, 0 );
7169 errorStream_ << "RtApiAlsa::getDeviceCount: control open, card = " << card << ", " << snd_strerror( result ) << ".";
7170 errorText_ = errorStream_.str();
7171 error( RtAudioError::WARNING );
7176 result = snd_ctl_pcm_next_device( handle, &subdevice );
7178 errorStream_ << "RtApiAlsa::getDeviceCount: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
7179 errorText_ = errorStream_.str();
7180 error( RtAudioError::WARNING );
7183 if ( subdevice < 0 )
7189 snd_ctl_close( handle );
7190 snd_card_next( &card );
7193 result = snd_ctl_open( &handle, "default", 0 );
7196 snd_ctl_close( handle );
7202 RtAudio::DeviceInfo RtApiAlsa :: getDeviceInfo( unsigned int device )
7204 RtAudio::DeviceInfo info;
7205 info.probed = false;
7207 unsigned nDevices = 0;
7208 int result, subdevice, card;
7210 snd_ctl_t *chandle = 0;
7212 // Count cards and devices
7215 snd_card_next( &card );
7216 while ( card >= 0 ) {
7217 sprintf( name, "hw:%d", card );
7218 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
7221 errorStream_ << "RtApiAlsa::getDeviceInfo: control open, card = " << card << ", " << snd_strerror( result ) << ".";
7222 errorText_ = errorStream_.str();
7223 error( RtAudioError::WARNING );
7228 result = snd_ctl_pcm_next_device( chandle, &subdevice );
7230 errorStream_ << "RtApiAlsa::getDeviceInfo: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
7231 errorText_ = errorStream_.str();
7232 error( RtAudioError::WARNING );
7235 if ( subdevice < 0 ) break;
7236 if ( nDevices == device ) {
7237 sprintf( name, "hw:%d,%d", card, subdevice );
7244 snd_ctl_close( chandle );
7245 snd_card_next( &card );
7248 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
7249 if ( result == 0 ) {
7250 if ( nDevices == device ) {
7251 strcpy( name, "default" );
7257 if ( nDevices == 0 ) {
7258 errorText_ = "RtApiAlsa::getDeviceInfo: no devices found!";
7259 error( RtAudioError::INVALID_USE );
7263 if ( device >= nDevices ) {
7264 errorText_ = "RtApiAlsa::getDeviceInfo: device ID is invalid!";
7265 error( RtAudioError::INVALID_USE );
7271 // If a stream is already open, we cannot probe the stream devices.
7272 // Thus, use the saved results.
7273 if ( stream_.state != STREAM_CLOSED &&
7274 ( stream_.device[0] == device || stream_.device[1] == device ) ) {
7275 snd_ctl_close( chandle );
7276 if ( device >= devices_.size() ) {
7277 errorText_ = "RtApiAlsa::getDeviceInfo: device ID was not present before stream was opened.";
7278 error( RtAudioError::WARNING );
7281 return devices_[ device ];
7284 int openMode = SND_PCM_ASYNC;
7285 snd_pcm_stream_t stream;
7286 snd_pcm_info_t *pcminfo;
7287 snd_pcm_info_alloca( &pcminfo );
7289 snd_pcm_hw_params_t *params;
7290 snd_pcm_hw_params_alloca( ¶ms );
7292 // First try for playback unless default device (which has subdev -1)
7293 stream = SND_PCM_STREAM_PLAYBACK;
7294 snd_pcm_info_set_stream( pcminfo, stream );
7295 if ( subdevice != -1 ) {
7296 snd_pcm_info_set_device( pcminfo, subdevice );
7297 snd_pcm_info_set_subdevice( pcminfo, 0 );
7299 result = snd_ctl_pcm_info( chandle, pcminfo );
7301 // Device probably doesn't support playback.
7306 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK );
7308 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7309 errorText_ = errorStream_.str();
7310 error( RtAudioError::WARNING );
7314 // The device is open ... fill the parameter structure.
7315 result = snd_pcm_hw_params_any( phandle, params );
7317 snd_pcm_close( phandle );
7318 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7319 errorText_ = errorStream_.str();
7320 error( RtAudioError::WARNING );
7324 // Get output channel information.
7326 result = snd_pcm_hw_params_get_channels_max( params, &value );
7328 snd_pcm_close( phandle );
7329 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") output channels, " << snd_strerror( result ) << ".";
7330 errorText_ = errorStream_.str();
7331 error( RtAudioError::WARNING );
7334 info.outputChannels = value;
7335 snd_pcm_close( phandle );
7338 stream = SND_PCM_STREAM_CAPTURE;
7339 snd_pcm_info_set_stream( pcminfo, stream );
7341 // Now try for capture unless default device (with subdev = -1)
7342 if ( subdevice != -1 ) {
7343 result = snd_ctl_pcm_info( chandle, pcminfo );
7344 snd_ctl_close( chandle );
7346 // Device probably doesn't support capture.
7347 if ( info.outputChannels == 0 ) return info;
7348 goto probeParameters;
7352 snd_ctl_close( chandle );
7354 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
7356 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7357 errorText_ = errorStream_.str();
7358 error( RtAudioError::WARNING );
7359 if ( info.outputChannels == 0 ) return info;
7360 goto probeParameters;
7363 // The device is open ... fill the parameter structure.
7364 result = snd_pcm_hw_params_any( phandle, params );
7366 snd_pcm_close( phandle );
7367 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7368 errorText_ = errorStream_.str();
7369 error( RtAudioError::WARNING );
7370 if ( info.outputChannels == 0 ) return info;
7371 goto probeParameters;
7374 result = snd_pcm_hw_params_get_channels_max( params, &value );
7376 snd_pcm_close( phandle );
7377 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") input channels, " << snd_strerror( result ) << ".";
7378 errorText_ = errorStream_.str();
7379 error( RtAudioError::WARNING );
7380 if ( info.outputChannels == 0 ) return info;
7381 goto probeParameters;
7383 info.inputChannels = value;
7384 snd_pcm_close( phandle );
7386 // If device opens for both playback and capture, we determine the channels.
7387 if ( info.outputChannels > 0 && info.inputChannels > 0 )
7388 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
7390 // ALSA doesn't provide default devices so we'll use the first available one.
7391 if ( device == 0 && info.outputChannels > 0 )
7392 info.isDefaultOutput = true;
7393 if ( device == 0 && info.inputChannels > 0 )
7394 info.isDefaultInput = true;
7397 // At this point, we just need to figure out the supported data
7398 // formats and sample rates. We'll proceed by opening the device in
7399 // the direction with the maximum number of channels, or playback if
7400 // they are equal. This might limit our sample rate options, but so
7403 if ( info.outputChannels >= info.inputChannels )
7404 stream = SND_PCM_STREAM_PLAYBACK;
7406 stream = SND_PCM_STREAM_CAPTURE;
7407 snd_pcm_info_set_stream( pcminfo, stream );
7409 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
7411 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7412 errorText_ = errorStream_.str();
7413 error( RtAudioError::WARNING );
7417 // The device is open ... fill the parameter structure.
7418 result = snd_pcm_hw_params_any( phandle, params );
7420 snd_pcm_close( phandle );
7421 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7422 errorText_ = errorStream_.str();
7423 error( RtAudioError::WARNING );
7427 // Test our discrete set of sample rate values.
7428 info.sampleRates.clear();
7429 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
7430 if ( snd_pcm_hw_params_test_rate( phandle, params, SAMPLE_RATES[i], 0 ) == 0 ) {
7431 info.sampleRates.push_back( SAMPLE_RATES[i] );
7433 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
7434 info.preferredSampleRate = SAMPLE_RATES[i];
7437 if ( info.sampleRates.size() == 0 ) {
7438 snd_pcm_close( phandle );
7439 errorStream_ << "RtApiAlsa::getDeviceInfo: no supported sample rates found for device (" << name << ").";
7440 errorText_ = errorStream_.str();
7441 error( RtAudioError::WARNING );
7445 // Probe the supported data formats ... we don't care about endian-ness just yet
7446 snd_pcm_format_t format;
7447 info.nativeFormats = 0;
7448 format = SND_PCM_FORMAT_S8;
7449 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7450 info.nativeFormats |= RTAUDIO_SINT8;
7451 format = SND_PCM_FORMAT_S16;
7452 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7453 info.nativeFormats |= RTAUDIO_SINT16;
7454 format = SND_PCM_FORMAT_S24;
7455 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7456 info.nativeFormats |= RTAUDIO_SINT24;
7457 format = SND_PCM_FORMAT_S32;
7458 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7459 info.nativeFormats |= RTAUDIO_SINT32;
7460 format = SND_PCM_FORMAT_FLOAT;
7461 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7462 info.nativeFormats |= RTAUDIO_FLOAT32;
7463 format = SND_PCM_FORMAT_FLOAT64;
7464 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7465 info.nativeFormats |= RTAUDIO_FLOAT64;
7467 // Check that we have at least one supported format
7468 if ( info.nativeFormats == 0 ) {
7469 snd_pcm_close( phandle );
7470 errorStream_ << "RtApiAlsa::getDeviceInfo: pcm device (" << name << ") data format not supported by RtAudio.";
7471 errorText_ = errorStream_.str();
7472 error( RtAudioError::WARNING );
7476 // Get the device name
7478 result = snd_card_get_name( card, &cardname );
7479 if ( result >= 0 ) {
7480 sprintf( name, "hw:%s,%d", cardname, subdevice );
7485 // That's all ... close the device and return
7486 snd_pcm_close( phandle );
7491 void RtApiAlsa :: saveDeviceInfo( void )
7495 unsigned int nDevices = getDeviceCount();
7496 devices_.resize( nDevices );
7497 for ( unsigned int i=0; i<nDevices; i++ )
7498 devices_[i] = getDeviceInfo( i );
7501 bool RtApiAlsa :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
7502 unsigned int firstChannel, unsigned int sampleRate,
7503 RtAudioFormat format, unsigned int *bufferSize,
7504 RtAudio::StreamOptions *options )
7507 #if defined(__RTAUDIO_DEBUG__)
7509 snd_output_stdio_attach(&out, stderr, 0);
7512 // I'm not using the "plug" interface ... too much inconsistent behavior.
7514 unsigned nDevices = 0;
7515 int result, subdevice, card;
7519 if ( options && options->flags & RTAUDIO_ALSA_USE_DEFAULT )
7520 snprintf(name, sizeof(name), "%s", "default");
7522 // Count cards and devices
7524 snd_card_next( &card );
7525 while ( card >= 0 ) {
7526 sprintf( name, "hw:%d", card );
7527 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
7529 errorStream_ << "RtApiAlsa::probeDeviceOpen: control open, card = " << card << ", " << snd_strerror( result ) << ".";
7530 errorText_ = errorStream_.str();
7535 result = snd_ctl_pcm_next_device( chandle, &subdevice );
7536 if ( result < 0 ) break;
7537 if ( subdevice < 0 ) break;
7538 if ( nDevices == device ) {
7539 sprintf( name, "hw:%d,%d", card, subdevice );
7540 snd_ctl_close( chandle );
7545 snd_ctl_close( chandle );
7546 snd_card_next( &card );
7549 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
7550 if ( result == 0 ) {
7551 if ( nDevices == device ) {
7552 strcpy( name, "default" );
7553 snd_ctl_close( chandle );
7558 snd_ctl_close( chandle );
7560 if ( nDevices == 0 ) {
7561 // This should not happen because a check is made before this function is called.
7562 errorText_ = "RtApiAlsa::probeDeviceOpen: no devices found!";
7566 if ( device >= nDevices ) {
7567 // This should not happen because a check is made before this function is called.
7568 errorText_ = "RtApiAlsa::probeDeviceOpen: device ID is invalid!";
7575 // The getDeviceInfo() function will not work for a device that is
7576 // already open. Thus, we'll probe the system before opening a
7577 // stream and save the results for use by getDeviceInfo().
7578 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) // only do once
7579 this->saveDeviceInfo();
7581 snd_pcm_stream_t stream;
7582 if ( mode == OUTPUT )
7583 stream = SND_PCM_STREAM_PLAYBACK;
7585 stream = SND_PCM_STREAM_CAPTURE;
7588 int openMode = SND_PCM_ASYNC;
7589 result = snd_pcm_open( &phandle, name, stream, openMode );
7591 if ( mode == OUTPUT )
7592 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for output.";
7594 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for input.";
7595 errorText_ = errorStream_.str();
7599 // Fill the parameter structure.
7600 snd_pcm_hw_params_t *hw_params;
7601 snd_pcm_hw_params_alloca( &hw_params );
7602 result = snd_pcm_hw_params_any( phandle, hw_params );
7604 snd_pcm_close( phandle );
7605 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") parameters, " << snd_strerror( result ) << ".";
7606 errorText_ = errorStream_.str();
7610 #if defined(__RTAUDIO_DEBUG__)
7611 fprintf( stderr, "\nRtApiAlsa: dump hardware params just after device open:\n\n" );
7612 snd_pcm_hw_params_dump( hw_params, out );
7615 // Set access ... check user preference.
7616 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) {
7617 stream_.userInterleaved = false;
7618 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
7620 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
7621 stream_.deviceInterleaved[mode] = true;
7624 stream_.deviceInterleaved[mode] = false;
7627 stream_.userInterleaved = true;
7628 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
7630 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
7631 stream_.deviceInterleaved[mode] = false;
7634 stream_.deviceInterleaved[mode] = true;
7638 snd_pcm_close( phandle );
7639 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") access, " << snd_strerror( result ) << ".";
7640 errorText_ = errorStream_.str();
7644 // Determine how to set the device format.
7645 stream_.userFormat = format;
7646 snd_pcm_format_t deviceFormat = SND_PCM_FORMAT_UNKNOWN;
7648 if ( format == RTAUDIO_SINT8 )
7649 deviceFormat = SND_PCM_FORMAT_S8;
7650 else if ( format == RTAUDIO_SINT16 )
7651 deviceFormat = SND_PCM_FORMAT_S16;
7652 else if ( format == RTAUDIO_SINT24 )
7653 deviceFormat = SND_PCM_FORMAT_S24;
7654 else if ( format == RTAUDIO_SINT32 )
7655 deviceFormat = SND_PCM_FORMAT_S32;
7656 else if ( format == RTAUDIO_FLOAT32 )
7657 deviceFormat = SND_PCM_FORMAT_FLOAT;
7658 else if ( format == RTAUDIO_FLOAT64 )
7659 deviceFormat = SND_PCM_FORMAT_FLOAT64;
7661 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat) == 0) {
7662 stream_.deviceFormat[mode] = format;
7666 // The user requested format is not natively supported by the device.
7667 deviceFormat = SND_PCM_FORMAT_FLOAT64;
7668 if ( snd_pcm_hw_params_test_format( phandle, hw_params, deviceFormat ) == 0 ) {
7669 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
7673 deviceFormat = SND_PCM_FORMAT_FLOAT;
7674 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7675 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
7679 deviceFormat = SND_PCM_FORMAT_S32;
7680 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7681 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
7685 deviceFormat = SND_PCM_FORMAT_S24;
7686 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7687 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
7691 deviceFormat = SND_PCM_FORMAT_S16;
7692 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7693 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
7697 deviceFormat = SND_PCM_FORMAT_S8;
7698 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7699 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
7703 // If we get here, no supported format was found.
7704 snd_pcm_close( phandle );
7705 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device " << device << " data format not supported by RtAudio.";
7706 errorText_ = errorStream_.str();
7710 result = snd_pcm_hw_params_set_format( phandle, hw_params, deviceFormat );
7712 snd_pcm_close( phandle );
7713 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") data format, " << snd_strerror( result ) << ".";
7714 errorText_ = errorStream_.str();
7718 // Determine whether byte-swaping is necessary.
7719 stream_.doByteSwap[mode] = false;
7720 if ( deviceFormat != SND_PCM_FORMAT_S8 ) {
7721 result = snd_pcm_format_cpu_endian( deviceFormat );
7723 stream_.doByteSwap[mode] = true;
7724 else if (result < 0) {
7725 snd_pcm_close( phandle );
7726 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") endian-ness, " << snd_strerror( result ) << ".";
7727 errorText_ = errorStream_.str();
7732 // Set the sample rate.
7733 result = snd_pcm_hw_params_set_rate_near( phandle, hw_params, (unsigned int*) &sampleRate, 0 );
7735 snd_pcm_close( phandle );
7736 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting sample rate on device (" << name << "), " << snd_strerror( result ) << ".";
7737 errorText_ = errorStream_.str();
7741 // Determine the number of channels for this device. We support a possible
7742 // minimum device channel number > than the value requested by the user.
7743 stream_.nUserChannels[mode] = channels;
7745 result = snd_pcm_hw_params_get_channels_max( hw_params, &value );
7746 unsigned int deviceChannels = value;
7747 if ( result < 0 || deviceChannels < channels + firstChannel ) {
7748 snd_pcm_close( phandle );
7749 errorStream_ << "RtApiAlsa::probeDeviceOpen: requested channel parameters not supported by device (" << name << "), " << snd_strerror( result ) << ".";
7750 errorText_ = errorStream_.str();
7754 result = snd_pcm_hw_params_get_channels_min( hw_params, &value );
7756 snd_pcm_close( phandle );
7757 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting minimum channels for device (" << name << "), " << snd_strerror( result ) << ".";
7758 errorText_ = errorStream_.str();
7761 deviceChannels = value;
7762 if ( deviceChannels < channels + firstChannel ) deviceChannels = channels + firstChannel;
7763 stream_.nDeviceChannels[mode] = deviceChannels;
7765 // Set the device channels.
7766 result = snd_pcm_hw_params_set_channels( phandle, hw_params, deviceChannels );
7768 snd_pcm_close( phandle );
7769 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting channels for device (" << name << "), " << snd_strerror( result ) << ".";
7770 errorText_ = errorStream_.str();
7774 // Set the buffer (or period) size.
7776 snd_pcm_uframes_t periodSize = *bufferSize;
7777 result = snd_pcm_hw_params_set_period_size_near( phandle, hw_params, &periodSize, &dir );
7779 snd_pcm_close( phandle );
7780 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting period size for device (" << name << "), " << snd_strerror( result ) << ".";
7781 errorText_ = errorStream_.str();
7784 *bufferSize = periodSize;
7786 // Set the buffer number, which in ALSA is referred to as the "period".
7787 unsigned int periods = 0;
7788 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) periods = 2;
7789 if ( options && options->numberOfBuffers > 0 ) periods = options->numberOfBuffers;
7790 if ( periods < 2 ) periods = 4; // a fairly safe default value
7791 result = snd_pcm_hw_params_set_periods_near( phandle, hw_params, &periods, &dir );
7793 snd_pcm_close( phandle );
7794 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting periods for device (" << name << "), " << snd_strerror( result ) << ".";
7795 errorText_ = errorStream_.str();
7799 // If attempting to setup a duplex stream, the bufferSize parameter
7800 // MUST be the same in both directions!
7801 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
7802 snd_pcm_close( phandle );
7803 errorStream_ << "RtApiAlsa::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << name << ").";
7804 errorText_ = errorStream_.str();
7808 stream_.bufferSize = *bufferSize;
7810 // Install the hardware configuration
7811 result = snd_pcm_hw_params( phandle, hw_params );
7813 snd_pcm_close( phandle );
7814 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing hardware configuration on device (" << name << "), " << snd_strerror( result ) << ".";
7815 errorText_ = errorStream_.str();
7819 #if defined(__RTAUDIO_DEBUG__)
7820 fprintf(stderr, "\nRtApiAlsa: dump hardware params after installation:\n\n");
7821 snd_pcm_hw_params_dump( hw_params, out );
7824 // Set the software configuration to fill buffers with zeros and prevent device stopping on xruns.
7825 snd_pcm_sw_params_t *sw_params = NULL;
7826 snd_pcm_sw_params_alloca( &sw_params );
7827 snd_pcm_sw_params_current( phandle, sw_params );
7828 snd_pcm_sw_params_set_start_threshold( phandle, sw_params, *bufferSize );
7829 snd_pcm_sw_params_set_stop_threshold( phandle, sw_params, ULONG_MAX );
7830 snd_pcm_sw_params_set_silence_threshold( phandle, sw_params, 0 );
7832 // The following two settings were suggested by Theo Veenker
7833 //snd_pcm_sw_params_set_avail_min( phandle, sw_params, *bufferSize );
7834 //snd_pcm_sw_params_set_xfer_align( phandle, sw_params, 1 );
7836 // here are two options for a fix
7837 //snd_pcm_sw_params_set_silence_size( phandle, sw_params, ULONG_MAX );
7838 snd_pcm_uframes_t val;
7839 snd_pcm_sw_params_get_boundary( sw_params, &val );
7840 snd_pcm_sw_params_set_silence_size( phandle, sw_params, val );
7842 result = snd_pcm_sw_params( phandle, sw_params );
7844 snd_pcm_close( phandle );
7845 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing software configuration on device (" << name << "), " << snd_strerror( result ) << ".";
7846 errorText_ = errorStream_.str();
7850 #if defined(__RTAUDIO_DEBUG__)
7851 fprintf(stderr, "\nRtApiAlsa: dump software params after installation:\n\n");
7852 snd_pcm_sw_params_dump( sw_params, out );
7855 // Set flags for buffer conversion
7856 stream_.doConvertBuffer[mode] = false;
7857 if ( stream_.userFormat != stream_.deviceFormat[mode] )
7858 stream_.doConvertBuffer[mode] = true;
7859 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
7860 stream_.doConvertBuffer[mode] = true;
7861 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
7862 stream_.nUserChannels[mode] > 1 )
7863 stream_.doConvertBuffer[mode] = true;
7865 // Allocate the ApiHandle if necessary and then save.
7866 AlsaHandle *apiInfo = 0;
7867 if ( stream_.apiHandle == 0 ) {
7869 apiInfo = (AlsaHandle *) new AlsaHandle;
7871 catch ( std::bad_alloc& ) {
7872 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating AlsaHandle memory.";
7876 if ( pthread_cond_init( &apiInfo->runnable_cv, NULL ) ) {
7877 errorText_ = "RtApiAlsa::probeDeviceOpen: error initializing pthread condition variable.";
7881 stream_.apiHandle = (void *) apiInfo;
7882 apiInfo->handles[0] = 0;
7883 apiInfo->handles[1] = 0;
7886 apiInfo = (AlsaHandle *) stream_.apiHandle;
7888 apiInfo->handles[mode] = phandle;
7891 // Allocate necessary internal buffers.
7892 unsigned long bufferBytes;
7893 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
7894 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
7895 if ( stream_.userBuffer[mode] == NULL ) {
7896 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating user buffer memory.";
7900 if ( stream_.doConvertBuffer[mode] ) {
7902 bool makeBuffer = true;
7903 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
7904 if ( mode == INPUT ) {
7905 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
7906 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
7907 if ( bufferBytes <= bytesOut ) makeBuffer = false;
7912 bufferBytes *= *bufferSize;
7913 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
7914 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
7915 if ( stream_.deviceBuffer == NULL ) {
7916 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating device buffer memory.";
7922 stream_.sampleRate = sampleRate;
7923 stream_.nBuffers = periods;
7924 stream_.device[mode] = device;
7925 stream_.state = STREAM_STOPPED;
7927 // Setup the buffer conversion information structure.
7928 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
7930 // Setup thread if necessary.
7931 if ( stream_.mode == OUTPUT && mode == INPUT ) {
7932 // We had already set up an output stream.
7933 stream_.mode = DUPLEX;
7934 // Link the streams if possible.
7935 apiInfo->synchronized = false;
7936 if ( snd_pcm_link( apiInfo->handles[0], apiInfo->handles[1] ) == 0 )
7937 apiInfo->synchronized = true;
7939 errorText_ = "RtApiAlsa::probeDeviceOpen: unable to synchronize input and output devices.";
7940 error( RtAudioError::WARNING );
7944 stream_.mode = mode;
7946 // Setup callback thread.
7947 stream_.callbackInfo.object = (void *) this;
7949 // Set the thread attributes for joinable and realtime scheduling
7950 // priority (optional). The higher priority will only take affect
7951 // if the program is run as root or suid. Note, under Linux
7952 // processes with CAP_SYS_NICE privilege, a user can change
7953 // scheduling policy and priority (thus need not be root). See
7954 // POSIX "capabilities".
7955 pthread_attr_t attr;
7956 pthread_attr_init( &attr );
7957 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
7958 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
7959 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
7960 stream_.callbackInfo.doRealtime = true;
7961 struct sched_param param;
7962 int priority = options->priority;
7963 int min = sched_get_priority_min( SCHED_RR );
7964 int max = sched_get_priority_max( SCHED_RR );
7965 if ( priority < min ) priority = min;
7966 else if ( priority > max ) priority = max;
7967 param.sched_priority = priority;
7969 // Set the policy BEFORE the priority. Otherwise it fails.
7970 pthread_attr_setschedpolicy(&attr, SCHED_RR);
7971 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
7972 // This is definitely required. Otherwise it fails.
7973 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
7974 pthread_attr_setschedparam(&attr, ¶m);
7977 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
7979 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
7982 stream_.callbackInfo.isRunning = true;
7983 result = pthread_create( &stream_.callbackInfo.thread, &attr, alsaCallbackHandler, &stream_.callbackInfo );
7984 pthread_attr_destroy( &attr );
7986 // Failed. Try instead with default attributes.
7987 result = pthread_create( &stream_.callbackInfo.thread, NULL, alsaCallbackHandler, &stream_.callbackInfo );
7989 stream_.callbackInfo.isRunning = false;
7990 errorText_ = "RtApiAlsa::error creating callback thread!";
8000 pthread_cond_destroy( &apiInfo->runnable_cv );
8001 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
8002 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
8004 stream_.apiHandle = 0;
8007 if ( phandle) snd_pcm_close( phandle );
8009 for ( int i=0; i<2; i++ ) {
8010 if ( stream_.userBuffer[i] ) {
8011 free( stream_.userBuffer[i] );
8012 stream_.userBuffer[i] = 0;
8016 if ( stream_.deviceBuffer ) {
8017 free( stream_.deviceBuffer );
8018 stream_.deviceBuffer = 0;
8021 stream_.state = STREAM_CLOSED;
8025 void RtApiAlsa :: closeStream()
8027 if ( stream_.state == STREAM_CLOSED ) {
8028 errorText_ = "RtApiAlsa::closeStream(): no open stream to close!";
8029 error( RtAudioError::WARNING );
8033 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8034 stream_.callbackInfo.isRunning = false;
8035 MUTEX_LOCK( &stream_.mutex );
8036 if ( stream_.state == STREAM_STOPPED ) {
8037 apiInfo->runnable = true;
8038 pthread_cond_signal( &apiInfo->runnable_cv );
8040 MUTEX_UNLOCK( &stream_.mutex );
8041 pthread_join( stream_.callbackInfo.thread, NULL );
8043 if ( stream_.state == STREAM_RUNNING ) {
8044 stream_.state = STREAM_STOPPED;
8045 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
8046 snd_pcm_drop( apiInfo->handles[0] );
8047 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
8048 snd_pcm_drop( apiInfo->handles[1] );
8052 pthread_cond_destroy( &apiInfo->runnable_cv );
8053 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
8054 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
8056 stream_.apiHandle = 0;
8059 for ( int i=0; i<2; i++ ) {
8060 if ( stream_.userBuffer[i] ) {
8061 free( stream_.userBuffer[i] );
8062 stream_.userBuffer[i] = 0;
8066 if ( stream_.deviceBuffer ) {
8067 free( stream_.deviceBuffer );
8068 stream_.deviceBuffer = 0;
8071 stream_.mode = UNINITIALIZED;
8072 stream_.state = STREAM_CLOSED;
8075 void RtApiAlsa :: startStream()
8077 // This method calls snd_pcm_prepare if the device isn't already in that state.
8080 RtApi::startStream();
8081 if ( stream_.state == STREAM_RUNNING ) {
8082 errorText_ = "RtApiAlsa::startStream(): the stream is already running!";
8083 error( RtAudioError::WARNING );
8087 MUTEX_LOCK( &stream_.mutex );
8089 #if defined( HAVE_GETTIMEOFDAY )
8090 gettimeofday( &stream_.lastTickTimestamp, NULL );
8094 snd_pcm_state_t state;
8095 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8096 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
8097 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8098 state = snd_pcm_state( handle[0] );
8099 if ( state != SND_PCM_STATE_PREPARED ) {
8100 result = snd_pcm_prepare( handle[0] );
8102 errorStream_ << "RtApiAlsa::startStream: error preparing output pcm device, " << snd_strerror( result ) << ".";
8103 errorText_ = errorStream_.str();
8109 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
8110 result = snd_pcm_drop(handle[1]); // fix to remove stale data received since device has been open
8111 state = snd_pcm_state( handle[1] );
8112 if ( state != SND_PCM_STATE_PREPARED ) {
8113 result = snd_pcm_prepare( handle[1] );
8115 errorStream_ << "RtApiAlsa::startStream: error preparing input pcm device, " << snd_strerror( result ) << ".";
8116 errorText_ = errorStream_.str();
8122 stream_.state = STREAM_RUNNING;
8125 apiInfo->runnable = true;
8126 pthread_cond_signal( &apiInfo->runnable_cv );
8127 MUTEX_UNLOCK( &stream_.mutex );
8129 if ( result >= 0 ) return;
8130 error( RtAudioError::SYSTEM_ERROR );
8133 void RtApiAlsa :: stopStream()
8136 if ( stream_.state == STREAM_STOPPED ) {
8137 errorText_ = "RtApiAlsa::stopStream(): the stream is already stopped!";
8138 error( RtAudioError::WARNING );
8142 stream_.state = STREAM_STOPPED;
8143 MUTEX_LOCK( &stream_.mutex );
8146 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8147 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
8148 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8149 if ( apiInfo->synchronized )
8150 result = snd_pcm_drop( handle[0] );
8152 result = snd_pcm_drain( handle[0] );
8154 errorStream_ << "RtApiAlsa::stopStream: error draining output pcm device, " << snd_strerror( result ) << ".";
8155 errorText_ = errorStream_.str();
8160 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
8161 result = snd_pcm_drop( handle[1] );
8163 errorStream_ << "RtApiAlsa::stopStream: error stopping input pcm device, " << snd_strerror( result ) << ".";
8164 errorText_ = errorStream_.str();
8170 apiInfo->runnable = false; // fixes high CPU usage when stopped
8171 MUTEX_UNLOCK( &stream_.mutex );
8173 if ( result >= 0 ) return;
8174 error( RtAudioError::SYSTEM_ERROR );
8177 void RtApiAlsa :: abortStream()
8180 if ( stream_.state == STREAM_STOPPED ) {
8181 errorText_ = "RtApiAlsa::abortStream(): the stream is already stopped!";
8182 error( RtAudioError::WARNING );
8186 stream_.state = STREAM_STOPPED;
8187 MUTEX_LOCK( &stream_.mutex );
8190 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8191 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
8192 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8193 result = snd_pcm_drop( handle[0] );
8195 errorStream_ << "RtApiAlsa::abortStream: error aborting output pcm device, " << snd_strerror( result ) << ".";
8196 errorText_ = errorStream_.str();
8201 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
8202 result = snd_pcm_drop( handle[1] );
8204 errorStream_ << "RtApiAlsa::abortStream: error aborting input pcm device, " << snd_strerror( result ) << ".";
8205 errorText_ = errorStream_.str();
8211 apiInfo->runnable = false; // fixes high CPU usage when stopped
8212 MUTEX_UNLOCK( &stream_.mutex );
8214 if ( result >= 0 ) return;
8215 error( RtAudioError::SYSTEM_ERROR );
8218 void RtApiAlsa :: callbackEvent()
8220 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8221 if ( stream_.state == STREAM_STOPPED ) {
8222 MUTEX_LOCK( &stream_.mutex );
8223 while ( !apiInfo->runnable )
8224 pthread_cond_wait( &apiInfo->runnable_cv, &stream_.mutex );
8226 if ( stream_.state != STREAM_RUNNING ) {
8227 MUTEX_UNLOCK( &stream_.mutex );
8230 MUTEX_UNLOCK( &stream_.mutex );
8233 if ( stream_.state == STREAM_CLOSED ) {
8234 errorText_ = "RtApiAlsa::callbackEvent(): the stream is closed ... this shouldn't happen!";
8235 error( RtAudioError::WARNING );
8239 int doStopStream = 0;
8240 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
8241 double streamTime = getStreamTime();
8242 RtAudioStreamStatus status = 0;
8243 if ( stream_.mode != INPUT && apiInfo->xrun[0] == true ) {
8244 status |= RTAUDIO_OUTPUT_UNDERFLOW;
8245 apiInfo->xrun[0] = false;
8247 if ( stream_.mode != OUTPUT && apiInfo->xrun[1] == true ) {
8248 status |= RTAUDIO_INPUT_OVERFLOW;
8249 apiInfo->xrun[1] = false;
8251 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
8252 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
8254 if ( doStopStream == 2 ) {
8259 MUTEX_LOCK( &stream_.mutex );
8261 // The state might change while waiting on a mutex.
8262 if ( stream_.state == STREAM_STOPPED ) goto unlock;
8268 snd_pcm_sframes_t frames;
8269 RtAudioFormat format;
8270 handle = (snd_pcm_t **) apiInfo->handles;
8272 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
8274 // Setup parameters.
8275 if ( stream_.doConvertBuffer[1] ) {
8276 buffer = stream_.deviceBuffer;
8277 channels = stream_.nDeviceChannels[1];
8278 format = stream_.deviceFormat[1];
8281 buffer = stream_.userBuffer[1];
8282 channels = stream_.nUserChannels[1];
8283 format = stream_.userFormat;
8286 // Read samples from device in interleaved/non-interleaved format.
8287 if ( stream_.deviceInterleaved[1] )
8288 result = snd_pcm_readi( handle[1], buffer, stream_.bufferSize );
8290 void *bufs[channels];
8291 size_t offset = stream_.bufferSize * formatBytes( format );
8292 for ( int i=0; i<channels; i++ )
8293 bufs[i] = (void *) (buffer + (i * offset));
8294 result = snd_pcm_readn( handle[1], bufs, stream_.bufferSize );
8297 if ( result < (int) stream_.bufferSize ) {
8298 // Either an error or overrun occured.
8299 if ( result == -EPIPE ) {
8300 snd_pcm_state_t state = snd_pcm_state( handle[1] );
8301 if ( state == SND_PCM_STATE_XRUN ) {
8302 apiInfo->xrun[1] = true;
8303 result = snd_pcm_prepare( handle[1] );
8305 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after overrun, " << snd_strerror( result ) << ".";
8306 errorText_ = errorStream_.str();
8310 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
8311 errorText_ = errorStream_.str();
8315 errorStream_ << "RtApiAlsa::callbackEvent: audio read error, " << snd_strerror( result ) << ".";
8316 errorText_ = errorStream_.str();
8318 error( RtAudioError::WARNING );
8322 // Do byte swapping if necessary.
8323 if ( stream_.doByteSwap[1] )
8324 byteSwapBuffer( buffer, stream_.bufferSize * channels, format );
8326 // Do buffer conversion if necessary.
8327 if ( stream_.doConvertBuffer[1] )
8328 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
8330 // Check stream latency
8331 result = snd_pcm_delay( handle[1], &frames );
8332 if ( result == 0 && frames > 0 ) stream_.latency[1] = frames;
8337 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8339 // Setup parameters and do buffer conversion if necessary.
8340 if ( stream_.doConvertBuffer[0] ) {
8341 buffer = stream_.deviceBuffer;
8342 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
8343 channels = stream_.nDeviceChannels[0];
8344 format = stream_.deviceFormat[0];
8347 buffer = stream_.userBuffer[0];
8348 channels = stream_.nUserChannels[0];
8349 format = stream_.userFormat;
8352 // Do byte swapping if necessary.
8353 if ( stream_.doByteSwap[0] )
8354 byteSwapBuffer(buffer, stream_.bufferSize * channels, format);
8356 // Write samples to device in interleaved/non-interleaved format.
8357 if ( stream_.deviceInterleaved[0] )
8358 result = snd_pcm_writei( handle[0], buffer, stream_.bufferSize );
8360 void *bufs[channels];
8361 size_t offset = stream_.bufferSize * formatBytes( format );
8362 for ( int i=0; i<channels; i++ )
8363 bufs[i] = (void *) (buffer + (i * offset));
8364 result = snd_pcm_writen( handle[0], bufs, stream_.bufferSize );
8367 if ( result < (int) stream_.bufferSize ) {
8368 // Either an error or underrun occured.
8369 if ( result == -EPIPE ) {
8370 snd_pcm_state_t state = snd_pcm_state( handle[0] );
8371 if ( state == SND_PCM_STATE_XRUN ) {
8372 apiInfo->xrun[0] = true;
8373 result = snd_pcm_prepare( handle[0] );
8375 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after underrun, " << snd_strerror( result ) << ".";
8376 errorText_ = errorStream_.str();
8379 errorText_ = "RtApiAlsa::callbackEvent: audio write error, underrun.";
8382 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
8383 errorText_ = errorStream_.str();
8387 errorStream_ << "RtApiAlsa::callbackEvent: audio write error, " << snd_strerror( result ) << ".";
8388 errorText_ = errorStream_.str();
8390 error( RtAudioError::WARNING );
8394 // Check stream latency
8395 result = snd_pcm_delay( handle[0], &frames );
8396 if ( result == 0 && frames > 0 ) stream_.latency[0] = frames;
8400 MUTEX_UNLOCK( &stream_.mutex );
8402 RtApi::tickStreamTime();
8403 if ( doStopStream == 1 ) this->stopStream();
8406 static void *alsaCallbackHandler( void *ptr )
8408 CallbackInfo *info = (CallbackInfo *) ptr;
8409 RtApiAlsa *object = (RtApiAlsa *) info->object;
8410 bool *isRunning = &info->isRunning;
8412 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
8413 if ( info->doRealtime ) {
8414 std::cerr << "RtAudio alsa: " <<
8415 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
8416 "running realtime scheduling" << std::endl;
8420 while ( *isRunning == true ) {
8421 pthread_testcancel();
8422 object->callbackEvent();
8425 pthread_exit( NULL );
8428 //******************** End of __LINUX_ALSA__ *********************//
8431 #if defined(__LINUX_PULSE__)
8433 // Code written by Peter Meerwald, pmeerw@pmeerw.net
8434 // and Tristan Matthews.
8436 #include <pulse/error.h>
8437 #include <pulse/simple.h>
8440 static const unsigned int SUPPORTED_SAMPLERATES[] = { 8000, 16000, 22050, 32000,
8441 44100, 48000, 96000, 0};
8443 struct rtaudio_pa_format_mapping_t {
8444 RtAudioFormat rtaudio_format;
8445 pa_sample_format_t pa_format;
8448 static const rtaudio_pa_format_mapping_t supported_sampleformats[] = {
8449 {RTAUDIO_SINT16, PA_SAMPLE_S16LE},
8450 {RTAUDIO_SINT32, PA_SAMPLE_S32LE},
8451 {RTAUDIO_FLOAT32, PA_SAMPLE_FLOAT32LE},
8452 {0, PA_SAMPLE_INVALID}};
8454 struct PulseAudioHandle {
8458 pthread_cond_t runnable_cv;
8460 PulseAudioHandle() : s_play(0), s_rec(0), runnable(false) { }
8463 RtApiPulse::~RtApiPulse()
8465 if ( stream_.state != STREAM_CLOSED )
8469 unsigned int RtApiPulse::getDeviceCount( void )
8474 RtAudio::DeviceInfo RtApiPulse::getDeviceInfo( unsigned int /*device*/ )
8476 RtAudio::DeviceInfo info;
8478 info.name = "PulseAudio";
8479 info.outputChannels = 2;
8480 info.inputChannels = 2;
8481 info.duplexChannels = 2;
8482 info.isDefaultOutput = true;
8483 info.isDefaultInput = true;
8485 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr )
8486 info.sampleRates.push_back( *sr );
8488 info.preferredSampleRate = 48000;
8489 info.nativeFormats = RTAUDIO_SINT16 | RTAUDIO_SINT32 | RTAUDIO_FLOAT32;
8494 static void *pulseaudio_callback( void * user )
8496 CallbackInfo *cbi = static_cast<CallbackInfo *>( user );
8497 RtApiPulse *context = static_cast<RtApiPulse *>( cbi->object );
8498 volatile bool *isRunning = &cbi->isRunning;
8500 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
8501 if (cbi->doRealtime) {
8502 std::cerr << "RtAudio pulse: " <<
8503 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
8504 "running realtime scheduling" << std::endl;
8508 while ( *isRunning ) {
8509 pthread_testcancel();
8510 context->callbackEvent();
8513 pthread_exit( NULL );
8516 void RtApiPulse::closeStream( void )
8518 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8520 stream_.callbackInfo.isRunning = false;
8522 MUTEX_LOCK( &stream_.mutex );
8523 if ( stream_.state == STREAM_STOPPED ) {
8524 pah->runnable = true;
8525 pthread_cond_signal( &pah->runnable_cv );
8527 MUTEX_UNLOCK( &stream_.mutex );
8529 pthread_join( pah->thread, 0 );
8530 if ( pah->s_play ) {
8531 pa_simple_flush( pah->s_play, NULL );
8532 pa_simple_free( pah->s_play );
8535 pa_simple_free( pah->s_rec );
8537 pthread_cond_destroy( &pah->runnable_cv );
8539 stream_.apiHandle = 0;
8542 if ( stream_.userBuffer[0] ) {
8543 free( stream_.userBuffer[0] );
8544 stream_.userBuffer[0] = 0;
8546 if ( stream_.userBuffer[1] ) {
8547 free( stream_.userBuffer[1] );
8548 stream_.userBuffer[1] = 0;
8551 stream_.state = STREAM_CLOSED;
8552 stream_.mode = UNINITIALIZED;
8555 void RtApiPulse::callbackEvent( void )
8557 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8559 if ( stream_.state == STREAM_STOPPED ) {
8560 MUTEX_LOCK( &stream_.mutex );
8561 while ( !pah->runnable )
8562 pthread_cond_wait( &pah->runnable_cv, &stream_.mutex );
8564 if ( stream_.state != STREAM_RUNNING ) {
8565 MUTEX_UNLOCK( &stream_.mutex );
8568 MUTEX_UNLOCK( &stream_.mutex );
8571 if ( stream_.state == STREAM_CLOSED ) {
8572 errorText_ = "RtApiPulse::callbackEvent(): the stream is closed ... "
8573 "this shouldn't happen!";
8574 error( RtAudioError::WARNING );
8578 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
8579 double streamTime = getStreamTime();
8580 RtAudioStreamStatus status = 0;
8581 int doStopStream = callback( stream_.userBuffer[OUTPUT], stream_.userBuffer[INPUT],
8582 stream_.bufferSize, streamTime, status,
8583 stream_.callbackInfo.userData );
8585 if ( doStopStream == 2 ) {
8590 MUTEX_LOCK( &stream_.mutex );
8591 void *pulse_in = stream_.doConvertBuffer[INPUT] ? stream_.deviceBuffer : stream_.userBuffer[INPUT];
8592 void *pulse_out = stream_.doConvertBuffer[OUTPUT] ? stream_.deviceBuffer : stream_.userBuffer[OUTPUT];
8594 if ( stream_.state != STREAM_RUNNING )
8599 if (stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8600 if ( stream_.doConvertBuffer[OUTPUT] ) {
8601 convertBuffer( stream_.deviceBuffer,
8602 stream_.userBuffer[OUTPUT],
8603 stream_.convertInfo[OUTPUT] );
8604 bytes = stream_.nDeviceChannels[OUTPUT] * stream_.bufferSize *
8605 formatBytes( stream_.deviceFormat[OUTPUT] );
8607 bytes = stream_.nUserChannels[OUTPUT] * stream_.bufferSize *
8608 formatBytes( stream_.userFormat );
8610 if ( pa_simple_write( pah->s_play, pulse_out, bytes, &pa_error ) < 0 ) {
8611 errorStream_ << "RtApiPulse::callbackEvent: audio write error, " <<
8612 pa_strerror( pa_error ) << ".";
8613 errorText_ = errorStream_.str();
8614 error( RtAudioError::WARNING );
8618 if ( stream_.mode == INPUT || stream_.mode == DUPLEX) {
8619 if ( stream_.doConvertBuffer[INPUT] )
8620 bytes = stream_.nDeviceChannels[INPUT] * stream_.bufferSize *
8621 formatBytes( stream_.deviceFormat[INPUT] );
8623 bytes = stream_.nUserChannels[INPUT] * stream_.bufferSize *
8624 formatBytes( stream_.userFormat );
8626 if ( pa_simple_read( pah->s_rec, pulse_in, bytes, &pa_error ) < 0 ) {
8627 errorStream_ << "RtApiPulse::callbackEvent: audio read error, " <<
8628 pa_strerror( pa_error ) << ".";
8629 errorText_ = errorStream_.str();
8630 error( RtAudioError::WARNING );
8632 if ( stream_.doConvertBuffer[INPUT] ) {
8633 convertBuffer( stream_.userBuffer[INPUT],
8634 stream_.deviceBuffer,
8635 stream_.convertInfo[INPUT] );
8640 MUTEX_UNLOCK( &stream_.mutex );
8641 RtApi::tickStreamTime();
8645 pa_usec_t const lat = pa_simple_get_latency(pah->s_play, &e);
8647 stream_.latency[0] = lat * stream_.sampleRate / 1000000;
8651 if ( doStopStream == 1 )
8655 void RtApiPulse::startStream( void )
8657 RtApi::startStream();
8658 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8660 if ( stream_.state == STREAM_CLOSED ) {
8661 errorText_ = "RtApiPulse::startStream(): the stream is not open!";
8662 error( RtAudioError::INVALID_USE );
8665 if ( stream_.state == STREAM_RUNNING ) {
8666 errorText_ = "RtApiPulse::startStream(): the stream is already running!";
8667 error( RtAudioError::WARNING );
8671 MUTEX_LOCK( &stream_.mutex );
8673 #if defined( HAVE_GETTIMEOFDAY )
8674 gettimeofday( &stream_.lastTickTimestamp, NULL );
8677 stream_.state = STREAM_RUNNING;
8679 pah->runnable = true;
8680 pthread_cond_signal( &pah->runnable_cv );
8681 MUTEX_UNLOCK( &stream_.mutex );
8684 void RtApiPulse::stopStream( void )
8686 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8688 if ( stream_.state == STREAM_CLOSED ) {
8689 errorText_ = "RtApiPulse::stopStream(): the stream is not open!";
8690 error( RtAudioError::INVALID_USE );
8693 if ( stream_.state == STREAM_STOPPED ) {
8694 errorText_ = "RtApiPulse::stopStream(): the stream is already stopped!";
8695 error( RtAudioError::WARNING );
8699 stream_.state = STREAM_STOPPED;
8700 pah->runnable = false;
8701 MUTEX_LOCK( &stream_.mutex );
8703 if ( pah && pah->s_play ) {
8705 if ( pa_simple_drain( pah->s_play, &pa_error ) < 0 ) {
8706 errorStream_ << "RtApiPulse::stopStream: error draining output device, " <<
8707 pa_strerror( pa_error ) << ".";
8708 errorText_ = errorStream_.str();
8709 MUTEX_UNLOCK( &stream_.mutex );
8710 error( RtAudioError::SYSTEM_ERROR );
8715 stream_.state = STREAM_STOPPED;
8716 MUTEX_UNLOCK( &stream_.mutex );
8719 void RtApiPulse::abortStream( void )
8721 PulseAudioHandle *pah = static_cast<PulseAudioHandle*>( stream_.apiHandle );
8723 if ( stream_.state == STREAM_CLOSED ) {
8724 errorText_ = "RtApiPulse::abortStream(): the stream is not open!";
8725 error( RtAudioError::INVALID_USE );
8728 if ( stream_.state == STREAM_STOPPED ) {
8729 errorText_ = "RtApiPulse::abortStream(): the stream is already stopped!";
8730 error( RtAudioError::WARNING );
8734 stream_.state = STREAM_STOPPED;
8735 pah->runnable = false;
8736 MUTEX_LOCK( &stream_.mutex );
8738 if ( pah && pah->s_play ) {
8740 if ( pa_simple_flush( pah->s_play, &pa_error ) < 0 ) {
8741 errorStream_ << "RtApiPulse::abortStream: error flushing output device, " <<
8742 pa_strerror( pa_error ) << ".";
8743 errorText_ = errorStream_.str();
8744 MUTEX_UNLOCK( &stream_.mutex );
8745 error( RtAudioError::SYSTEM_ERROR );
8750 stream_.state = STREAM_STOPPED;
8751 MUTEX_UNLOCK( &stream_.mutex );
8754 bool RtApiPulse::probeDeviceOpen( unsigned int device, StreamMode mode,
8755 unsigned int channels, unsigned int firstChannel,
8756 unsigned int sampleRate, RtAudioFormat format,
8757 unsigned int *bufferSize, RtAudio::StreamOptions *options )
8759 PulseAudioHandle *pah = 0;
8760 unsigned long bufferBytes = 0;
8763 if ( device != 0 ) return false;
8764 if ( mode != INPUT && mode != OUTPUT ) return false;
8765 if ( channels != 1 && channels != 2 ) {
8766 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported number of channels.";
8769 ss.channels = channels;
8771 if ( firstChannel != 0 ) return false;
8773 bool sr_found = false;
8774 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr ) {
8775 if ( sampleRate == *sr ) {
8777 stream_.sampleRate = sampleRate;
8778 ss.rate = sampleRate;
8783 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported sample rate.";
8788 for ( const rtaudio_pa_format_mapping_t *sf = supported_sampleformats;
8789 sf->rtaudio_format && sf->pa_format != PA_SAMPLE_INVALID; ++sf ) {
8790 if ( format == sf->rtaudio_format ) {
8792 stream_.userFormat = sf->rtaudio_format;
8793 stream_.deviceFormat[mode] = stream_.userFormat;
8794 ss.format = sf->pa_format;
8798 if ( !sf_found ) { // Use internal data format conversion.
8799 stream_.userFormat = format;
8800 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
8801 ss.format = PA_SAMPLE_FLOAT32LE;
8804 // Set other stream parameters.
8805 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
8806 else stream_.userInterleaved = true;
8807 stream_.deviceInterleaved[mode] = true;
8808 stream_.nBuffers = 1;
8809 stream_.doByteSwap[mode] = false;
8810 stream_.nUserChannels[mode] = channels;
8811 stream_.nDeviceChannels[mode] = channels + firstChannel;
8812 stream_.channelOffset[mode] = 0;
8813 std::string streamName = "RtAudio";
8815 // Set flags for buffer conversion.
8816 stream_.doConvertBuffer[mode] = false;
8817 if ( stream_.userFormat != stream_.deviceFormat[mode] )
8818 stream_.doConvertBuffer[mode] = true;
8819 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
8820 stream_.doConvertBuffer[mode] = true;
8822 // Allocate necessary internal buffers.
8823 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
8824 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
8825 if ( stream_.userBuffer[mode] == NULL ) {
8826 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating user buffer memory.";
8829 stream_.bufferSize = *bufferSize;
8831 if ( stream_.doConvertBuffer[mode] ) {
8833 bool makeBuffer = true;
8834 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
8835 if ( mode == INPUT ) {
8836 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
8837 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
8838 if ( bufferBytes <= bytesOut ) makeBuffer = false;
8843 bufferBytes *= *bufferSize;
8844 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
8845 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
8846 if ( stream_.deviceBuffer == NULL ) {
8847 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating device buffer memory.";
8853 stream_.device[mode] = device;
8855 // Setup the buffer conversion information structure.
8856 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
8858 if ( !stream_.apiHandle ) {
8859 PulseAudioHandle *pah = new PulseAudioHandle;
8861 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating memory for handle.";
8865 stream_.apiHandle = pah;
8866 if ( pthread_cond_init( &pah->runnable_cv, NULL ) != 0 ) {
8867 errorText_ = "RtApiPulse::probeDeviceOpen: error creating condition variable.";
8871 pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8874 if ( options && !options->streamName.empty() ) streamName = options->streamName;
8877 pa_buffer_attr buffer_attr;
8878 buffer_attr.fragsize = bufferBytes;
8879 buffer_attr.maxlength = -1;
8881 pah->s_rec = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_RECORD, NULL, "Record", &ss, NULL, &buffer_attr, &error );
8882 if ( !pah->s_rec ) {
8883 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting input to PulseAudio server.";
8888 pah->s_play = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_PLAYBACK, NULL, "Playback", &ss, NULL, NULL, &error );
8889 if ( !pah->s_play ) {
8890 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting output to PulseAudio server.";
8898 if ( stream_.mode == UNINITIALIZED )
8899 stream_.mode = mode;
8900 else if ( stream_.mode == mode )
8903 stream_.mode = DUPLEX;
8905 if ( !stream_.callbackInfo.isRunning ) {
8906 stream_.callbackInfo.object = this;
8908 stream_.state = STREAM_STOPPED;
8909 // Set the thread attributes for joinable and realtime scheduling
8910 // priority (optional). The higher priority will only take affect
8911 // if the program is run as root or suid. Note, under Linux
8912 // processes with CAP_SYS_NICE privilege, a user can change
8913 // scheduling policy and priority (thus need not be root). See
8914 // POSIX "capabilities".
8915 pthread_attr_t attr;
8916 pthread_attr_init( &attr );
8917 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
8918 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
8919 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
8920 stream_.callbackInfo.doRealtime = true;
8921 struct sched_param param;
8922 int priority = options->priority;
8923 int min = sched_get_priority_min( SCHED_RR );
8924 int max = sched_get_priority_max( SCHED_RR );
8925 if ( priority < min ) priority = min;
8926 else if ( priority > max ) priority = max;
8927 param.sched_priority = priority;
8929 // Set the policy BEFORE the priority. Otherwise it fails.
8930 pthread_attr_setschedpolicy(&attr, SCHED_RR);
8931 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
8932 // This is definitely required. Otherwise it fails.
8933 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
8934 pthread_attr_setschedparam(&attr, ¶m);
8937 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
8939 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
8942 stream_.callbackInfo.isRunning = true;
8943 int result = pthread_create( &pah->thread, &attr, pulseaudio_callback, (void *)&stream_.callbackInfo);
8944 pthread_attr_destroy(&attr);
8946 // Failed. Try instead with default attributes.
8947 result = pthread_create( &pah->thread, NULL, pulseaudio_callback, (void *)&stream_.callbackInfo);
8949 stream_.callbackInfo.isRunning = false;
8950 errorText_ = "RtApiPulse::probeDeviceOpen: error creating thread.";
8959 if ( pah && stream_.callbackInfo.isRunning ) {
8960 pthread_cond_destroy( &pah->runnable_cv );
8962 stream_.apiHandle = 0;
8965 for ( int i=0; i<2; i++ ) {
8966 if ( stream_.userBuffer[i] ) {
8967 free( stream_.userBuffer[i] );
8968 stream_.userBuffer[i] = 0;
8972 if ( stream_.deviceBuffer ) {
8973 free( stream_.deviceBuffer );
8974 stream_.deviceBuffer = 0;
8977 stream_.state = STREAM_CLOSED;
8981 //******************** End of __LINUX_PULSE__ *********************//
8984 #if defined(__LINUX_OSS__)
8987 #include <sys/ioctl.h>
8990 #include <sys/soundcard.h>
8994 static void *ossCallbackHandler(void * ptr);
8996 // A structure to hold various information related to the OSS API
8999 int id[2]; // device ids
9002 pthread_cond_t runnable;
9005 :triggered(false) { id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
9008 RtApiOss :: RtApiOss()
9010 // Nothing to do here.
9013 RtApiOss :: ~RtApiOss()
9015 if ( stream_.state != STREAM_CLOSED ) closeStream();
9018 unsigned int RtApiOss :: getDeviceCount( void )
9020 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
9021 if ( mixerfd == -1 ) {
9022 errorText_ = "RtApiOss::getDeviceCount: error opening '/dev/mixer'.";
9023 error( RtAudioError::WARNING );
9027 oss_sysinfo sysinfo;
9028 if ( ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo ) == -1 ) {
9030 errorText_ = "RtApiOss::getDeviceCount: error getting sysinfo, OSS version >= 4.0 is required.";
9031 error( RtAudioError::WARNING );
9036 return sysinfo.numaudios;
9039 RtAudio::DeviceInfo RtApiOss :: getDeviceInfo( unsigned int device )
9041 RtAudio::DeviceInfo info;
9042 info.probed = false;
9044 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
9045 if ( mixerfd == -1 ) {
9046 errorText_ = "RtApiOss::getDeviceInfo: error opening '/dev/mixer'.";
9047 error( RtAudioError::WARNING );
9051 oss_sysinfo sysinfo;
9052 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
9053 if ( result == -1 ) {
9055 errorText_ = "RtApiOss::getDeviceInfo: error getting sysinfo, OSS version >= 4.0 is required.";
9056 error( RtAudioError::WARNING );
9060 unsigned nDevices = sysinfo.numaudios;
9061 if ( nDevices == 0 ) {
9063 errorText_ = "RtApiOss::getDeviceInfo: no devices found!";
9064 error( RtAudioError::INVALID_USE );
9068 if ( device >= nDevices ) {
9070 errorText_ = "RtApiOss::getDeviceInfo: device ID is invalid!";
9071 error( RtAudioError::INVALID_USE );
9075 oss_audioinfo ainfo;
9077 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
9079 if ( result == -1 ) {
9080 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
9081 errorText_ = errorStream_.str();
9082 error( RtAudioError::WARNING );
9087 if ( ainfo.caps & PCM_CAP_OUTPUT ) info.outputChannels = ainfo.max_channels;
9088 if ( ainfo.caps & PCM_CAP_INPUT ) info.inputChannels = ainfo.max_channels;
9089 if ( ainfo.caps & PCM_CAP_DUPLEX ) {
9090 if ( info.outputChannels > 0 && info.inputChannels > 0 && ainfo.caps & PCM_CAP_DUPLEX )
9091 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
9094 // Probe data formats ... do for input
9095 unsigned long mask = ainfo.iformats;
9096 if ( mask & AFMT_S16_LE || mask & AFMT_S16_BE )
9097 info.nativeFormats |= RTAUDIO_SINT16;
9098 if ( mask & AFMT_S8 )
9099 info.nativeFormats |= RTAUDIO_SINT8;
9100 if ( mask & AFMT_S32_LE || mask & AFMT_S32_BE )
9101 info.nativeFormats |= RTAUDIO_SINT32;
9103 if ( mask & AFMT_FLOAT )
9104 info.nativeFormats |= RTAUDIO_FLOAT32;
9106 if ( mask & AFMT_S24_LE || mask & AFMT_S24_BE )
9107 info.nativeFormats |= RTAUDIO_SINT24;
9109 // Check that we have at least one supported format
9110 if ( info.nativeFormats == 0 ) {
9111 errorStream_ << "RtApiOss::getDeviceInfo: device (" << ainfo.name << ") data format not supported by RtAudio.";
9112 errorText_ = errorStream_.str();
9113 error( RtAudioError::WARNING );
9117 // Probe the supported sample rates.
9118 info.sampleRates.clear();
9119 if ( ainfo.nrates ) {
9120 for ( unsigned int i=0; i<ainfo.nrates; i++ ) {
9121 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
9122 if ( ainfo.rates[i] == SAMPLE_RATES[k] ) {
9123 info.sampleRates.push_back( SAMPLE_RATES[k] );
9125 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
9126 info.preferredSampleRate = SAMPLE_RATES[k];
9134 // Check min and max rate values;
9135 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
9136 if ( ainfo.min_rate <= (int) SAMPLE_RATES[k] && ainfo.max_rate >= (int) SAMPLE_RATES[k] ) {
9137 info.sampleRates.push_back( SAMPLE_RATES[k] );
9139 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
9140 info.preferredSampleRate = SAMPLE_RATES[k];
9145 if ( info.sampleRates.size() == 0 ) {
9146 errorStream_ << "RtApiOss::getDeviceInfo: no supported sample rates found for device (" << ainfo.name << ").";
9147 errorText_ = errorStream_.str();
9148 error( RtAudioError::WARNING );
9152 info.name = ainfo.name;
9159 bool RtApiOss :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
9160 unsigned int firstChannel, unsigned int sampleRate,
9161 RtAudioFormat format, unsigned int *bufferSize,
9162 RtAudio::StreamOptions *options )
9164 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
9165 if ( mixerfd == -1 ) {
9166 errorText_ = "RtApiOss::probeDeviceOpen: error opening '/dev/mixer'.";
9170 oss_sysinfo sysinfo;
9171 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
9172 if ( result == -1 ) {
9174 errorText_ = "RtApiOss::probeDeviceOpen: error getting sysinfo, OSS version >= 4.0 is required.";
9178 unsigned nDevices = sysinfo.numaudios;
9179 if ( nDevices == 0 ) {
9180 // This should not happen because a check is made before this function is called.
9182 errorText_ = "RtApiOss::probeDeviceOpen: no devices found!";
9186 if ( device >= nDevices ) {
9187 // This should not happen because a check is made before this function is called.
9189 errorText_ = "RtApiOss::probeDeviceOpen: device ID is invalid!";
9193 oss_audioinfo ainfo;
9195 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
9197 if ( result == -1 ) {
9198 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
9199 errorText_ = errorStream_.str();
9203 // Check if device supports input or output
9204 if ( ( mode == OUTPUT && !( ainfo.caps & PCM_CAP_OUTPUT ) ) ||
9205 ( mode == INPUT && !( ainfo.caps & PCM_CAP_INPUT ) ) ) {
9206 if ( mode == OUTPUT )
9207 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support output.";
9209 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support input.";
9210 errorText_ = errorStream_.str();
9215 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9216 if ( mode == OUTPUT )
9218 else { // mode == INPUT
9219 if (stream_.mode == OUTPUT && stream_.device[0] == device) {
9220 // We just set the same device for playback ... close and reopen for duplex (OSS only).
9221 close( handle->id[0] );
9223 if ( !( ainfo.caps & PCM_CAP_DUPLEX ) ) {
9224 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support duplex mode.";
9225 errorText_ = errorStream_.str();
9228 // Check that the number previously set channels is the same.
9229 if ( stream_.nUserChannels[0] != channels ) {
9230 errorStream_ << "RtApiOss::probeDeviceOpen: input/output channels must be equal for OSS duplex device (" << ainfo.name << ").";
9231 errorText_ = errorStream_.str();
9240 // Set exclusive access if specified.
9241 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) flags |= O_EXCL;
9243 // Try to open the device.
9245 fd = open( ainfo.devnode, flags, 0 );
9247 if ( errno == EBUSY )
9248 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") is busy.";
9250 errorStream_ << "RtApiOss::probeDeviceOpen: error opening device (" << ainfo.name << ").";
9251 errorText_ = errorStream_.str();
9255 // For duplex operation, specifically set this mode (this doesn't seem to work).
9257 if ( flags | O_RDWR ) {
9258 result = ioctl( fd, SNDCTL_DSP_SETDUPLEX, NULL );
9259 if ( result == -1) {
9260 errorStream_ << "RtApiOss::probeDeviceOpen: error setting duplex mode for device (" << ainfo.name << ").";
9261 errorText_ = errorStream_.str();
9267 // Check the device channel support.
9268 stream_.nUserChannels[mode] = channels;
9269 if ( ainfo.max_channels < (int)(channels + firstChannel) ) {
9271 errorStream_ << "RtApiOss::probeDeviceOpen: the device (" << ainfo.name << ") does not support requested channel parameters.";
9272 errorText_ = errorStream_.str();
9276 // Set the number of channels.
9277 int deviceChannels = channels + firstChannel;
9278 result = ioctl( fd, SNDCTL_DSP_CHANNELS, &deviceChannels );
9279 if ( result == -1 || deviceChannels < (int)(channels + firstChannel) ) {
9281 errorStream_ << "RtApiOss::probeDeviceOpen: error setting channel parameters on device (" << ainfo.name << ").";
9282 errorText_ = errorStream_.str();
9285 stream_.nDeviceChannels[mode] = deviceChannels;
9287 // Get the data format mask
9289 result = ioctl( fd, SNDCTL_DSP_GETFMTS, &mask );
9290 if ( result == -1 ) {
9292 errorStream_ << "RtApiOss::probeDeviceOpen: error getting device (" << ainfo.name << ") data formats.";
9293 errorText_ = errorStream_.str();
9297 // Determine how to set the device format.
9298 stream_.userFormat = format;
9299 int deviceFormat = -1;
9300 stream_.doByteSwap[mode] = false;
9301 if ( format == RTAUDIO_SINT8 ) {
9302 if ( mask & AFMT_S8 ) {
9303 deviceFormat = AFMT_S8;
9304 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
9307 else if ( format == RTAUDIO_SINT16 ) {
9308 if ( mask & AFMT_S16_NE ) {
9309 deviceFormat = AFMT_S16_NE;
9310 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9312 else if ( mask & AFMT_S16_OE ) {
9313 deviceFormat = AFMT_S16_OE;
9314 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9315 stream_.doByteSwap[mode] = true;
9318 else if ( format == RTAUDIO_SINT24 ) {
9319 if ( mask & AFMT_S24_NE ) {
9320 deviceFormat = AFMT_S24_NE;
9321 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9323 else if ( mask & AFMT_S24_OE ) {
9324 deviceFormat = AFMT_S24_OE;
9325 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9326 stream_.doByteSwap[mode] = true;
9329 else if ( format == RTAUDIO_SINT32 ) {
9330 if ( mask & AFMT_S32_NE ) {
9331 deviceFormat = AFMT_S32_NE;
9332 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9334 else if ( mask & AFMT_S32_OE ) {
9335 deviceFormat = AFMT_S32_OE;
9336 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9337 stream_.doByteSwap[mode] = true;
9341 if ( deviceFormat == -1 ) {
9342 // The user requested format is not natively supported by the device.
9343 if ( mask & AFMT_S16_NE ) {
9344 deviceFormat = AFMT_S16_NE;
9345 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9347 else if ( mask & AFMT_S32_NE ) {
9348 deviceFormat = AFMT_S32_NE;
9349 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9351 else if ( mask & AFMT_S24_NE ) {
9352 deviceFormat = AFMT_S24_NE;
9353 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9355 else if ( mask & AFMT_S16_OE ) {
9356 deviceFormat = AFMT_S16_OE;
9357 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9358 stream_.doByteSwap[mode] = true;
9360 else if ( mask & AFMT_S32_OE ) {
9361 deviceFormat = AFMT_S32_OE;
9362 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9363 stream_.doByteSwap[mode] = true;
9365 else if ( mask & AFMT_S24_OE ) {
9366 deviceFormat = AFMT_S24_OE;
9367 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9368 stream_.doByteSwap[mode] = true;
9370 else if ( mask & AFMT_S8) {
9371 deviceFormat = AFMT_S8;
9372 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
9376 if ( stream_.deviceFormat[mode] == 0 ) {
9377 // This really shouldn't happen ...
9379 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") data format not supported by RtAudio.";
9380 errorText_ = errorStream_.str();
9384 // Set the data format.
9385 int temp = deviceFormat;
9386 result = ioctl( fd, SNDCTL_DSP_SETFMT, &deviceFormat );
9387 if ( result == -1 || deviceFormat != temp ) {
9389 errorStream_ << "RtApiOss::probeDeviceOpen: error setting data format on device (" << ainfo.name << ").";
9390 errorText_ = errorStream_.str();
9394 // Attempt to set the buffer size. According to OSS, the minimum
9395 // number of buffers is two. The supposed minimum buffer size is 16
9396 // bytes, so that will be our lower bound. The argument to this
9397 // call is in the form 0xMMMMSSSS (hex), where the buffer size (in
9398 // bytes) is given as 2^SSSS and the number of buffers as 2^MMMM.
9399 // We'll check the actual value used near the end of the setup
9401 int ossBufferBytes = *bufferSize * formatBytes( stream_.deviceFormat[mode] ) * deviceChannels;
9402 if ( ossBufferBytes < 16 ) ossBufferBytes = 16;
9404 if ( options ) buffers = options->numberOfBuffers;
9405 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) buffers = 2;
9406 if ( buffers < 2 ) buffers = 3;
9407 temp = ((int) buffers << 16) + (int)( log10( (double)ossBufferBytes ) / log10( 2.0 ) );
9408 result = ioctl( fd, SNDCTL_DSP_SETFRAGMENT, &temp );
9409 if ( result == -1 ) {
9411 errorStream_ << "RtApiOss::probeDeviceOpen: error setting buffer size on device (" << ainfo.name << ").";
9412 errorText_ = errorStream_.str();
9415 stream_.nBuffers = buffers;
9417 // Save buffer size (in sample frames).
9418 *bufferSize = ossBufferBytes / ( formatBytes(stream_.deviceFormat[mode]) * deviceChannels );
9419 stream_.bufferSize = *bufferSize;
9421 // Set the sample rate.
9422 int srate = sampleRate;
9423 result = ioctl( fd, SNDCTL_DSP_SPEED, &srate );
9424 if ( result == -1 ) {
9426 errorStream_ << "RtApiOss::probeDeviceOpen: error setting sample rate (" << sampleRate << ") on device (" << ainfo.name << ").";
9427 errorText_ = errorStream_.str();
9431 // Verify the sample rate setup worked.
9432 if ( abs( srate - (int)sampleRate ) > 100 ) {
9434 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support sample rate (" << sampleRate << ").";
9435 errorText_ = errorStream_.str();
9438 stream_.sampleRate = sampleRate;
9440 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device) {
9441 // We're doing duplex setup here.
9442 stream_.deviceFormat[0] = stream_.deviceFormat[1];
9443 stream_.nDeviceChannels[0] = deviceChannels;
9446 // Set interleaving parameters.
9447 stream_.userInterleaved = true;
9448 stream_.deviceInterleaved[mode] = true;
9449 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
9450 stream_.userInterleaved = false;
9452 // Set flags for buffer conversion
9453 stream_.doConvertBuffer[mode] = false;
9454 if ( stream_.userFormat != stream_.deviceFormat[mode] )
9455 stream_.doConvertBuffer[mode] = true;
9456 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
9457 stream_.doConvertBuffer[mode] = true;
9458 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
9459 stream_.nUserChannels[mode] > 1 )
9460 stream_.doConvertBuffer[mode] = true;
9462 // Allocate the stream handles if necessary and then save.
9463 if ( stream_.apiHandle == 0 ) {
9465 handle = new OssHandle;
9467 catch ( std::bad_alloc& ) {
9468 errorText_ = "RtApiOss::probeDeviceOpen: error allocating OssHandle memory.";
9472 if ( pthread_cond_init( &handle->runnable, NULL ) ) {
9473 errorText_ = "RtApiOss::probeDeviceOpen: error initializing pthread condition variable.";
9477 stream_.apiHandle = (void *) handle;
9480 handle = (OssHandle *) stream_.apiHandle;
9482 handle->id[mode] = fd;
9484 // Allocate necessary internal buffers.
9485 unsigned long bufferBytes;
9486 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
9487 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
9488 if ( stream_.userBuffer[mode] == NULL ) {
9489 errorText_ = "RtApiOss::probeDeviceOpen: error allocating user buffer memory.";
9493 if ( stream_.doConvertBuffer[mode] ) {
9495 bool makeBuffer = true;
9496 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
9497 if ( mode == INPUT ) {
9498 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
9499 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
9500 if ( bufferBytes <= bytesOut ) makeBuffer = false;
9505 bufferBytes *= *bufferSize;
9506 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
9507 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
9508 if ( stream_.deviceBuffer == NULL ) {
9509 errorText_ = "RtApiOss::probeDeviceOpen: error allocating device buffer memory.";
9515 stream_.device[mode] = device;
9516 stream_.state = STREAM_STOPPED;
9518 // Setup the buffer conversion information structure.
9519 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
9521 // Setup thread if necessary.
9522 if ( stream_.mode == OUTPUT && mode == INPUT ) {
9523 // We had already set up an output stream.
9524 stream_.mode = DUPLEX;
9525 if ( stream_.device[0] == device ) handle->id[0] = fd;
9528 stream_.mode = mode;
9530 // Setup callback thread.
9531 stream_.callbackInfo.object = (void *) this;
9533 // Set the thread attributes for joinable and realtime scheduling
9534 // priority. The higher priority will only take affect if the
9535 // program is run as root or suid.
9536 pthread_attr_t attr;
9537 pthread_attr_init( &attr );
9538 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
9539 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
9540 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
9541 stream_.callbackInfo.doRealtime = true;
9542 struct sched_param param;
9543 int priority = options->priority;
9544 int min = sched_get_priority_min( SCHED_RR );
9545 int max = sched_get_priority_max( SCHED_RR );
9546 if ( priority < min ) priority = min;
9547 else if ( priority > max ) priority = max;
9548 param.sched_priority = priority;
9550 // Set the policy BEFORE the priority. Otherwise it fails.
9551 pthread_attr_setschedpolicy(&attr, SCHED_RR);
9552 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
9553 // This is definitely required. Otherwise it fails.
9554 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
9555 pthread_attr_setschedparam(&attr, ¶m);
9558 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
9560 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
9563 stream_.callbackInfo.isRunning = true;
9564 result = pthread_create( &stream_.callbackInfo.thread, &attr, ossCallbackHandler, &stream_.callbackInfo );
9565 pthread_attr_destroy( &attr );
9567 // Failed. Try instead with default attributes.
9568 result = pthread_create( &stream_.callbackInfo.thread, NULL, ossCallbackHandler, &stream_.callbackInfo );
9570 stream_.callbackInfo.isRunning = false;
9571 errorText_ = "RtApiOss::error creating callback thread!";
9581 pthread_cond_destroy( &handle->runnable );
9582 if ( handle->id[0] ) close( handle->id[0] );
9583 if ( handle->id[1] ) close( handle->id[1] );
9585 stream_.apiHandle = 0;
9588 for ( int i=0; i<2; i++ ) {
9589 if ( stream_.userBuffer[i] ) {
9590 free( stream_.userBuffer[i] );
9591 stream_.userBuffer[i] = 0;
9595 if ( stream_.deviceBuffer ) {
9596 free( stream_.deviceBuffer );
9597 stream_.deviceBuffer = 0;
9600 stream_.state = STREAM_CLOSED;
9604 void RtApiOss :: closeStream()
9606 if ( stream_.state == STREAM_CLOSED ) {
9607 errorText_ = "RtApiOss::closeStream(): no open stream to close!";
9608 error( RtAudioError::WARNING );
9612 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9613 stream_.callbackInfo.isRunning = false;
9614 MUTEX_LOCK( &stream_.mutex );
9615 if ( stream_.state == STREAM_STOPPED )
9616 pthread_cond_signal( &handle->runnable );
9617 MUTEX_UNLOCK( &stream_.mutex );
9618 pthread_join( stream_.callbackInfo.thread, NULL );
9620 if ( stream_.state == STREAM_RUNNING ) {
9621 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
9622 ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9624 ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9625 stream_.state = STREAM_STOPPED;
9629 pthread_cond_destroy( &handle->runnable );
9630 if ( handle->id[0] ) close( handle->id[0] );
9631 if ( handle->id[1] ) close( handle->id[1] );
9633 stream_.apiHandle = 0;
9636 for ( int i=0; i<2; i++ ) {
9637 if ( stream_.userBuffer[i] ) {
9638 free( stream_.userBuffer[i] );
9639 stream_.userBuffer[i] = 0;
9643 if ( stream_.deviceBuffer ) {
9644 free( stream_.deviceBuffer );
9645 stream_.deviceBuffer = 0;
9648 stream_.mode = UNINITIALIZED;
9649 stream_.state = STREAM_CLOSED;
9652 void RtApiOss :: startStream()
9655 RtApi::startStream();
9656 if ( stream_.state == STREAM_RUNNING ) {
9657 errorText_ = "RtApiOss::startStream(): the stream is already running!";
9658 error( RtAudioError::WARNING );
9662 MUTEX_LOCK( &stream_.mutex );
9664 #if defined( HAVE_GETTIMEOFDAY )
9665 gettimeofday( &stream_.lastTickTimestamp, NULL );
9668 stream_.state = STREAM_RUNNING;
9670 // No need to do anything else here ... OSS automatically starts
9671 // when fed samples.
9673 MUTEX_UNLOCK( &stream_.mutex );
9675 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9676 pthread_cond_signal( &handle->runnable );
9679 void RtApiOss :: stopStream()
9682 if ( stream_.state == STREAM_STOPPED ) {
9683 errorText_ = "RtApiOss::stopStream(): the stream is already stopped!";
9684 error( RtAudioError::WARNING );
9688 MUTEX_LOCK( &stream_.mutex );
9690 // The state might change while waiting on a mutex.
9691 if ( stream_.state == STREAM_STOPPED ) {
9692 MUTEX_UNLOCK( &stream_.mutex );
9697 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9698 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9700 // Flush the output with zeros a few times.
9703 RtAudioFormat format;
9705 if ( stream_.doConvertBuffer[0] ) {
9706 buffer = stream_.deviceBuffer;
9707 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
9708 format = stream_.deviceFormat[0];
9711 buffer = stream_.userBuffer[0];
9712 samples = stream_.bufferSize * stream_.nUserChannels[0];
9713 format = stream_.userFormat;
9716 memset( buffer, 0, samples * formatBytes(format) );
9717 for ( unsigned int i=0; i<stream_.nBuffers+1; i++ ) {
9718 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9719 if ( result == -1 ) {
9720 errorText_ = "RtApiOss::stopStream: audio write error.";
9721 error( RtAudioError::WARNING );
9725 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9726 if ( result == -1 ) {
9727 errorStream_ << "RtApiOss::stopStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
9728 errorText_ = errorStream_.str();
9731 handle->triggered = false;
9734 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
9735 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9736 if ( result == -1 ) {
9737 errorStream_ << "RtApiOss::stopStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
9738 errorText_ = errorStream_.str();
9744 stream_.state = STREAM_STOPPED;
9745 MUTEX_UNLOCK( &stream_.mutex );
9747 if ( result != -1 ) return;
9748 error( RtAudioError::SYSTEM_ERROR );
9751 void RtApiOss :: abortStream()
9754 if ( stream_.state == STREAM_STOPPED ) {
9755 errorText_ = "RtApiOss::abortStream(): the stream is already stopped!";
9756 error( RtAudioError::WARNING );
9760 MUTEX_LOCK( &stream_.mutex );
9762 // The state might change while waiting on a mutex.
9763 if ( stream_.state == STREAM_STOPPED ) {
9764 MUTEX_UNLOCK( &stream_.mutex );
9769 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9770 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9771 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9772 if ( result == -1 ) {
9773 errorStream_ << "RtApiOss::abortStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
9774 errorText_ = errorStream_.str();
9777 handle->triggered = false;
9780 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
9781 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9782 if ( result == -1 ) {
9783 errorStream_ << "RtApiOss::abortStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
9784 errorText_ = errorStream_.str();
9790 stream_.state = STREAM_STOPPED;
9791 MUTEX_UNLOCK( &stream_.mutex );
9793 if ( result != -1 ) return;
9794 error( RtAudioError::SYSTEM_ERROR );
9797 void RtApiOss :: callbackEvent()
9799 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9800 if ( stream_.state == STREAM_STOPPED ) {
9801 MUTEX_LOCK( &stream_.mutex );
9802 pthread_cond_wait( &handle->runnable, &stream_.mutex );
9803 if ( stream_.state != STREAM_RUNNING ) {
9804 MUTEX_UNLOCK( &stream_.mutex );
9807 MUTEX_UNLOCK( &stream_.mutex );
9810 if ( stream_.state == STREAM_CLOSED ) {
9811 errorText_ = "RtApiOss::callbackEvent(): the stream is closed ... this shouldn't happen!";
9812 error( RtAudioError::WARNING );
9816 // Invoke user callback to get fresh output data.
9817 int doStopStream = 0;
9818 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
9819 double streamTime = getStreamTime();
9820 RtAudioStreamStatus status = 0;
9821 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
9822 status |= RTAUDIO_OUTPUT_UNDERFLOW;
9823 handle->xrun[0] = false;
9825 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
9826 status |= RTAUDIO_INPUT_OVERFLOW;
9827 handle->xrun[1] = false;
9829 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
9830 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
9831 if ( doStopStream == 2 ) {
9832 this->abortStream();
9836 MUTEX_LOCK( &stream_.mutex );
9838 // The state might change while waiting on a mutex.
9839 if ( stream_.state == STREAM_STOPPED ) goto unlock;
9844 RtAudioFormat format;
9846 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9848 // Setup parameters and do buffer conversion if necessary.
9849 if ( stream_.doConvertBuffer[0] ) {
9850 buffer = stream_.deviceBuffer;
9851 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
9852 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
9853 format = stream_.deviceFormat[0];
9856 buffer = stream_.userBuffer[0];
9857 samples = stream_.bufferSize * stream_.nUserChannels[0];
9858 format = stream_.userFormat;
9861 // Do byte swapping if necessary.
9862 if ( stream_.doByteSwap[0] )
9863 byteSwapBuffer( buffer, samples, format );
9865 if ( stream_.mode == DUPLEX && handle->triggered == false ) {
9867 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
9868 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9869 trig = PCM_ENABLE_INPUT|PCM_ENABLE_OUTPUT;
9870 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
9871 handle->triggered = true;
9874 // Write samples to device.
9875 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9877 if ( result == -1 ) {
9878 // We'll assume this is an underrun, though there isn't a
9879 // specific means for determining that.
9880 handle->xrun[0] = true;
9881 errorText_ = "RtApiOss::callbackEvent: audio write error.";
9882 error( RtAudioError::WARNING );
9883 // Continue on to input section.
9887 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
9889 // Setup parameters.
9890 if ( stream_.doConvertBuffer[1] ) {
9891 buffer = stream_.deviceBuffer;
9892 samples = stream_.bufferSize * stream_.nDeviceChannels[1];
9893 format = stream_.deviceFormat[1];
9896 buffer = stream_.userBuffer[1];
9897 samples = stream_.bufferSize * stream_.nUserChannels[1];
9898 format = stream_.userFormat;
9901 // Read samples from device.
9902 result = read( handle->id[1], buffer, samples * formatBytes(format) );
9904 if ( result == -1 ) {
9905 // We'll assume this is an overrun, though there isn't a
9906 // specific means for determining that.
9907 handle->xrun[1] = true;
9908 errorText_ = "RtApiOss::callbackEvent: audio read error.";
9909 error( RtAudioError::WARNING );
9913 // Do byte swapping if necessary.
9914 if ( stream_.doByteSwap[1] )
9915 byteSwapBuffer( buffer, samples, format );
9917 // Do buffer conversion if necessary.
9918 if ( stream_.doConvertBuffer[1] )
9919 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
9923 MUTEX_UNLOCK( &stream_.mutex );
9925 RtApi::tickStreamTime();
9926 if ( doStopStream == 1 ) this->stopStream();
9929 static void *ossCallbackHandler( void *ptr )
9931 CallbackInfo *info = (CallbackInfo *) ptr;
9932 RtApiOss *object = (RtApiOss *) info->object;
9933 bool *isRunning = &info->isRunning;
9935 #ifdef SCHED_RR // Undefined with some OSes (e.g. NetBSD 1.6.x with GNU Pthread)
9936 if (info->doRealtime) {
9937 std::cerr << "RtAudio oss: " <<
9938 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
9939 "running realtime scheduling" << std::endl;
9943 while ( *isRunning == true ) {
9944 pthread_testcancel();
9945 object->callbackEvent();
9948 pthread_exit( NULL );
9951 //******************** End of __LINUX_OSS__ *********************//
9955 // *************************************************** //
9957 // Protected common (OS-independent) RtAudio methods.
9959 // *************************************************** //
9961 // This method can be modified to control the behavior of error
9962 // message printing.
9963 void RtApi :: error( RtAudioError::Type type )
9965 errorStream_.str(""); // clear the ostringstream
9967 RtAudioErrorCallback errorCallback = (RtAudioErrorCallback) stream_.callbackInfo.errorCallback;
9968 if ( errorCallback ) {
9969 // abortStream() can generate new error messages. Ignore them. Just keep original one.
9971 if ( firstErrorOccurred_ )
9974 firstErrorOccurred_ = true;
9975 const std::string errorMessage = errorText_;
9977 if ( type != RtAudioError::WARNING && stream_.state != STREAM_STOPPED) {
9978 stream_.callbackInfo.isRunning = false; // exit from the thread
9982 errorCallback( type, errorMessage );
9983 firstErrorOccurred_ = false;
9987 if ( type == RtAudioError::WARNING && showWarnings_ == true )
9988 std::cerr << '\n' << errorText_ << "\n\n";
9989 else if ( type != RtAudioError::WARNING )
9990 throw( RtAudioError( errorText_, type ) );
9993 void RtApi :: verifyStream()
9995 if ( stream_.state == STREAM_CLOSED ) {
9996 errorText_ = "RtApi:: a stream is not open!";
9997 error( RtAudioError::INVALID_USE );
10001 void RtApi :: clearStreamInfo()
10003 stream_.mode = UNINITIALIZED;
10004 stream_.state = STREAM_CLOSED;
10005 stream_.sampleRate = 0;
10006 stream_.bufferSize = 0;
10007 stream_.nBuffers = 0;
10008 stream_.userFormat = 0;
10009 stream_.userInterleaved = true;
10010 stream_.streamTime = 0.0;
10011 stream_.apiHandle = 0;
10012 stream_.deviceBuffer = 0;
10013 stream_.callbackInfo.callback = 0;
10014 stream_.callbackInfo.userData = 0;
10015 stream_.callbackInfo.isRunning = false;
10016 stream_.callbackInfo.errorCallback = 0;
10017 for ( int i=0; i<2; i++ ) {
10018 stream_.device[i] = 11111;
10019 stream_.doConvertBuffer[i] = false;
10020 stream_.deviceInterleaved[i] = true;
10021 stream_.doByteSwap[i] = false;
10022 stream_.nUserChannels[i] = 0;
10023 stream_.nDeviceChannels[i] = 0;
10024 stream_.channelOffset[i] = 0;
10025 stream_.deviceFormat[i] = 0;
10026 stream_.latency[i] = 0;
10027 stream_.userBuffer[i] = 0;
10028 stream_.convertInfo[i].channels = 0;
10029 stream_.convertInfo[i].inJump = 0;
10030 stream_.convertInfo[i].outJump = 0;
10031 stream_.convertInfo[i].inFormat = 0;
10032 stream_.convertInfo[i].outFormat = 0;
10033 stream_.convertInfo[i].inOffset.clear();
10034 stream_.convertInfo[i].outOffset.clear();
10038 unsigned int RtApi :: formatBytes( RtAudioFormat format )
10040 if ( format == RTAUDIO_SINT16 )
10042 else if ( format == RTAUDIO_SINT32 || format == RTAUDIO_FLOAT32 )
10044 else if ( format == RTAUDIO_FLOAT64 )
10046 else if ( format == RTAUDIO_SINT24 )
10048 else if ( format == RTAUDIO_SINT8 )
10051 errorText_ = "RtApi::formatBytes: undefined format.";
10052 error( RtAudioError::WARNING );
10057 void RtApi :: setConvertInfo( StreamMode mode, unsigned int firstChannel )
10059 if ( mode == INPUT ) { // convert device to user buffer
10060 stream_.convertInfo[mode].inJump = stream_.nDeviceChannels[1];
10061 stream_.convertInfo[mode].outJump = stream_.nUserChannels[1];
10062 stream_.convertInfo[mode].inFormat = stream_.deviceFormat[1];
10063 stream_.convertInfo[mode].outFormat = stream_.userFormat;
10065 else { // convert user to device buffer
10066 stream_.convertInfo[mode].inJump = stream_.nUserChannels[0];
10067 stream_.convertInfo[mode].outJump = stream_.nDeviceChannels[0];
10068 stream_.convertInfo[mode].inFormat = stream_.userFormat;
10069 stream_.convertInfo[mode].outFormat = stream_.deviceFormat[0];
10072 if ( stream_.convertInfo[mode].inJump < stream_.convertInfo[mode].outJump )
10073 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].inJump;
10075 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].outJump;
10077 // Set up the interleave/deinterleave offsets.
10078 if ( stream_.deviceInterleaved[mode] != stream_.userInterleaved ) {
10079 if ( ( mode == OUTPUT && stream_.deviceInterleaved[mode] ) ||
10080 ( mode == INPUT && stream_.userInterleaved ) ) {
10081 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
10082 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
10083 stream_.convertInfo[mode].outOffset.push_back( k );
10084 stream_.convertInfo[mode].inJump = 1;
10088 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
10089 stream_.convertInfo[mode].inOffset.push_back( k );
10090 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
10091 stream_.convertInfo[mode].outJump = 1;
10095 else { // no (de)interleaving
10096 if ( stream_.userInterleaved ) {
10097 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
10098 stream_.convertInfo[mode].inOffset.push_back( k );
10099 stream_.convertInfo[mode].outOffset.push_back( k );
10103 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
10104 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
10105 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
10106 stream_.convertInfo[mode].inJump = 1;
10107 stream_.convertInfo[mode].outJump = 1;
10112 // Add channel offset.
10113 if ( firstChannel > 0 ) {
10114 if ( stream_.deviceInterleaved[mode] ) {
10115 if ( mode == OUTPUT ) {
10116 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
10117 stream_.convertInfo[mode].outOffset[k] += firstChannel;
10120 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
10121 stream_.convertInfo[mode].inOffset[k] += firstChannel;
10125 if ( mode == OUTPUT ) {
10126 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
10127 stream_.convertInfo[mode].outOffset[k] += ( firstChannel * stream_.bufferSize );
10130 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
10131 stream_.convertInfo[mode].inOffset[k] += ( firstChannel * stream_.bufferSize );
10137 void RtApi :: convertBuffer( char *outBuffer, char *inBuffer, ConvertInfo &info )
10139 // This function does format conversion, input/output channel compensation, and
10140 // data interleaving/deinterleaving. 24-bit integers are assumed to occupy
10141 // the lower three bytes of a 32-bit integer.
10143 // Clear our device buffer when in/out duplex device channels are different
10144 if ( outBuffer == stream_.deviceBuffer && stream_.mode == DUPLEX &&
10145 ( stream_.nDeviceChannels[0] < stream_.nDeviceChannels[1] ) )
10146 memset( outBuffer, 0, stream_.bufferSize * info.outJump * formatBytes( info.outFormat ) );
10149 if (info.outFormat == RTAUDIO_FLOAT64) {
10151 Float64 *out = (Float64 *)outBuffer;
10153 if (info.inFormat == RTAUDIO_SINT8) {
10154 signed char *in = (signed char *)inBuffer;
10155 scale = 1.0 / 127.5;
10156 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10157 for (j=0; j<info.channels; j++) {
10158 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
10159 out[info.outOffset[j]] += 0.5;
10160 out[info.outOffset[j]] *= scale;
10163 out += info.outJump;
10166 else if (info.inFormat == RTAUDIO_SINT16) {
10167 Int16 *in = (Int16 *)inBuffer;
10168 scale = 1.0 / 32767.5;
10169 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10170 for (j=0; j<info.channels; j++) {
10171 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
10172 out[info.outOffset[j]] += 0.5;
10173 out[info.outOffset[j]] *= scale;
10176 out += info.outJump;
10179 else if (info.inFormat == RTAUDIO_SINT24) {
10180 Int24 *in = (Int24 *)inBuffer;
10181 scale = 1.0 / 8388607.5;
10182 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10183 for (j=0; j<info.channels; j++) {
10184 out[info.outOffset[j]] = (Float64) (in[info.inOffset[j]].asInt());
10185 out[info.outOffset[j]] += 0.5;
10186 out[info.outOffset[j]] *= scale;
10189 out += info.outJump;
10192 else if (info.inFormat == RTAUDIO_SINT32) {
10193 Int32 *in = (Int32 *)inBuffer;
10194 scale = 1.0 / 2147483647.5;
10195 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10196 for (j=0; j<info.channels; j++) {
10197 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
10198 out[info.outOffset[j]] += 0.5;
10199 out[info.outOffset[j]] *= scale;
10202 out += info.outJump;
10205 else if (info.inFormat == RTAUDIO_FLOAT32) {
10206 Float32 *in = (Float32 *)inBuffer;
10207 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10208 for (j=0; j<info.channels; j++) {
10209 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
10212 out += info.outJump;
10215 else if (info.inFormat == RTAUDIO_FLOAT64) {
10216 // Channel compensation and/or (de)interleaving only.
10217 Float64 *in = (Float64 *)inBuffer;
10218 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10219 for (j=0; j<info.channels; j++) {
10220 out[info.outOffset[j]] = in[info.inOffset[j]];
10223 out += info.outJump;
10227 else if (info.outFormat == RTAUDIO_FLOAT32) {
10229 Float32 *out = (Float32 *)outBuffer;
10231 if (info.inFormat == RTAUDIO_SINT8) {
10232 signed char *in = (signed char *)inBuffer;
10233 scale = (Float32) ( 1.0 / 127.5 );
10234 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10235 for (j=0; j<info.channels; j++) {
10236 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10237 out[info.outOffset[j]] += 0.5;
10238 out[info.outOffset[j]] *= scale;
10241 out += info.outJump;
10244 else if (info.inFormat == RTAUDIO_SINT16) {
10245 Int16 *in = (Int16 *)inBuffer;
10246 scale = (Float32) ( 1.0 / 32767.5 );
10247 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10248 for (j=0; j<info.channels; j++) {
10249 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10250 out[info.outOffset[j]] += 0.5;
10251 out[info.outOffset[j]] *= scale;
10254 out += info.outJump;
10257 else if (info.inFormat == RTAUDIO_SINT24) {
10258 Int24 *in = (Int24 *)inBuffer;
10259 scale = (Float32) ( 1.0 / 8388607.5 );
10260 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10261 for (j=0; j<info.channels; j++) {
10262 out[info.outOffset[j]] = (Float32) (in[info.inOffset[j]].asInt());
10263 out[info.outOffset[j]] += 0.5;
10264 out[info.outOffset[j]] *= scale;
10267 out += info.outJump;
10270 else if (info.inFormat == RTAUDIO_SINT32) {
10271 Int32 *in = (Int32 *)inBuffer;
10272 scale = (Float32) ( 1.0 / 2147483647.5 );
10273 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10274 for (j=0; j<info.channels; j++) {
10275 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10276 out[info.outOffset[j]] += 0.5;
10277 out[info.outOffset[j]] *= scale;
10280 out += info.outJump;
10283 else if (info.inFormat == RTAUDIO_FLOAT32) {
10284 // Channel compensation and/or (de)interleaving only.
10285 Float32 *in = (Float32 *)inBuffer;
10286 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10287 for (j=0; j<info.channels; j++) {
10288 out[info.outOffset[j]] = in[info.inOffset[j]];
10291 out += info.outJump;
10294 else if (info.inFormat == RTAUDIO_FLOAT64) {
10295 Float64 *in = (Float64 *)inBuffer;
10296 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10297 for (j=0; j<info.channels; j++) {
10298 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10301 out += info.outJump;
10305 else if (info.outFormat == RTAUDIO_SINT32) {
10306 Int32 *out = (Int32 *)outBuffer;
10307 if (info.inFormat == RTAUDIO_SINT8) {
10308 signed char *in = (signed char *)inBuffer;
10309 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10310 for (j=0; j<info.channels; j++) {
10311 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
10312 out[info.outOffset[j]] <<= 24;
10315 out += info.outJump;
10318 else if (info.inFormat == RTAUDIO_SINT16) {
10319 Int16 *in = (Int16 *)inBuffer;
10320 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10321 for (j=0; j<info.channels; j++) {
10322 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
10323 out[info.outOffset[j]] <<= 16;
10326 out += info.outJump;
10329 else if (info.inFormat == RTAUDIO_SINT24) {
10330 Int24 *in = (Int24 *)inBuffer;
10331 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10332 for (j=0; j<info.channels; j++) {
10333 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]].asInt();
10334 out[info.outOffset[j]] <<= 8;
10337 out += info.outJump;
10340 else if (info.inFormat == RTAUDIO_SINT32) {
10341 // Channel compensation and/or (de)interleaving only.
10342 Int32 *in = (Int32 *)inBuffer;
10343 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10344 for (j=0; j<info.channels; j++) {
10345 out[info.outOffset[j]] = in[info.inOffset[j]];
10348 out += info.outJump;
10351 else if (info.inFormat == RTAUDIO_FLOAT32) {
10352 Float32 *in = (Float32 *)inBuffer;
10353 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10354 for (j=0; j<info.channels; j++) {
10355 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
10358 out += info.outJump;
10361 else if (info.inFormat == RTAUDIO_FLOAT64) {
10362 Float64 *in = (Float64 *)inBuffer;
10363 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10364 for (j=0; j<info.channels; j++) {
10365 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
10368 out += info.outJump;
10372 else if (info.outFormat == RTAUDIO_SINT24) {
10373 Int24 *out = (Int24 *)outBuffer;
10374 if (info.inFormat == RTAUDIO_SINT8) {
10375 signed char *in = (signed char *)inBuffer;
10376 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10377 for (j=0; j<info.channels; j++) {
10378 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 16);
10379 //out[info.outOffset[j]] <<= 16;
10382 out += info.outJump;
10385 else if (info.inFormat == RTAUDIO_SINT16) {
10386 Int16 *in = (Int16 *)inBuffer;
10387 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10388 for (j=0; j<info.channels; j++) {
10389 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 8);
10390 //out[info.outOffset[j]] <<= 8;
10393 out += info.outJump;
10396 else if (info.inFormat == RTAUDIO_SINT24) {
10397 // Channel compensation and/or (de)interleaving only.
10398 Int24 *in = (Int24 *)inBuffer;
10399 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10400 for (j=0; j<info.channels; j++) {
10401 out[info.outOffset[j]] = in[info.inOffset[j]];
10404 out += info.outJump;
10407 else if (info.inFormat == RTAUDIO_SINT32) {
10408 Int32 *in = (Int32 *)inBuffer;
10409 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10410 for (j=0; j<info.channels; j++) {
10411 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] >> 8);
10412 //out[info.outOffset[j]] >>= 8;
10415 out += info.outJump;
10418 else if (info.inFormat == RTAUDIO_FLOAT32) {
10419 Float32 *in = (Float32 *)inBuffer;
10420 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10421 for (j=0; j<info.channels; j++) {
10422 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
10425 out += info.outJump;
10428 else if (info.inFormat == RTAUDIO_FLOAT64) {
10429 Float64 *in = (Float64 *)inBuffer;
10430 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10431 for (j=0; j<info.channels; j++) {
10432 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
10435 out += info.outJump;
10439 else if (info.outFormat == RTAUDIO_SINT16) {
10440 Int16 *out = (Int16 *)outBuffer;
10441 if (info.inFormat == RTAUDIO_SINT8) {
10442 signed char *in = (signed char *)inBuffer;
10443 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10444 for (j=0; j<info.channels; j++) {
10445 out[info.outOffset[j]] = (Int16) in[info.inOffset[j]];
10446 out[info.outOffset[j]] <<= 8;
10449 out += info.outJump;
10452 else if (info.inFormat == RTAUDIO_SINT16) {
10453 // Channel compensation and/or (de)interleaving only.
10454 Int16 *in = (Int16 *)inBuffer;
10455 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10456 for (j=0; j<info.channels; j++) {
10457 out[info.outOffset[j]] = in[info.inOffset[j]];
10460 out += info.outJump;
10463 else if (info.inFormat == RTAUDIO_SINT24) {
10464 Int24 *in = (Int24 *)inBuffer;
10465 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10466 for (j=0; j<info.channels; j++) {
10467 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]].asInt() >> 8);
10470 out += info.outJump;
10473 else if (info.inFormat == RTAUDIO_SINT32) {
10474 Int32 *in = (Int32 *)inBuffer;
10475 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10476 for (j=0; j<info.channels; j++) {
10477 out[info.outOffset[j]] = (Int16) ((in[info.inOffset[j]] >> 16) & 0x0000ffff);
10480 out += info.outJump;
10483 else if (info.inFormat == RTAUDIO_FLOAT32) {
10484 Float32 *in = (Float32 *)inBuffer;
10485 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10486 for (j=0; j<info.channels; j++) {
10487 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
10490 out += info.outJump;
10493 else if (info.inFormat == RTAUDIO_FLOAT64) {
10494 Float64 *in = (Float64 *)inBuffer;
10495 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10496 for (j=0; j<info.channels; j++) {
10497 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
10500 out += info.outJump;
10504 else if (info.outFormat == RTAUDIO_SINT8) {
10505 signed char *out = (signed char *)outBuffer;
10506 if (info.inFormat == RTAUDIO_SINT8) {
10507 // Channel compensation and/or (de)interleaving only.
10508 signed char *in = (signed char *)inBuffer;
10509 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10510 for (j=0; j<info.channels; j++) {
10511 out[info.outOffset[j]] = in[info.inOffset[j]];
10514 out += info.outJump;
10517 if (info.inFormat == RTAUDIO_SINT16) {
10518 Int16 *in = (Int16 *)inBuffer;
10519 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10520 for (j=0; j<info.channels; j++) {
10521 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 8) & 0x00ff);
10524 out += info.outJump;
10527 else if (info.inFormat == RTAUDIO_SINT24) {
10528 Int24 *in = (Int24 *)inBuffer;
10529 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10530 for (j=0; j<info.channels; j++) {
10531 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]].asInt() >> 16);
10534 out += info.outJump;
10537 else if (info.inFormat == RTAUDIO_SINT32) {
10538 Int32 *in = (Int32 *)inBuffer;
10539 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10540 for (j=0; j<info.channels; j++) {
10541 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 24) & 0x000000ff);
10544 out += info.outJump;
10547 else if (info.inFormat == RTAUDIO_FLOAT32) {
10548 Float32 *in = (Float32 *)inBuffer;
10549 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10550 for (j=0; j<info.channels; j++) {
10551 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
10554 out += info.outJump;
10557 else if (info.inFormat == RTAUDIO_FLOAT64) {
10558 Float64 *in = (Float64 *)inBuffer;
10559 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10560 for (j=0; j<info.channels; j++) {
10561 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
10564 out += info.outJump;
10570 //static inline uint16_t bswap_16(uint16_t x) { return (x>>8) | (x<<8); }
10571 //static inline uint32_t bswap_32(uint32_t x) { return (bswap_16(x&0xffff)<<16) | (bswap_16(x>>16)); }
10572 //static inline uint64_t bswap_64(uint64_t x) { return (((unsigned long long)bswap_32(x&0xffffffffull))<<32) | (bswap_32(x>>32)); }
10574 void RtApi :: byteSwapBuffer( char *buffer, unsigned int samples, RtAudioFormat format )
10580 if ( format == RTAUDIO_SINT16 ) {
10581 for ( unsigned int i=0; i<samples; i++ ) {
10582 // Swap 1st and 2nd bytes.
10587 // Increment 2 bytes.
10591 else if ( format == RTAUDIO_SINT32 ||
10592 format == RTAUDIO_FLOAT32 ) {
10593 for ( unsigned int i=0; i<samples; i++ ) {
10594 // Swap 1st and 4th bytes.
10599 // Swap 2nd and 3rd bytes.
10605 // Increment 3 more bytes.
10609 else if ( format == RTAUDIO_SINT24 ) {
10610 for ( unsigned int i=0; i<samples; i++ ) {
10611 // Swap 1st and 3rd bytes.
10616 // Increment 2 more bytes.
10620 else if ( format == RTAUDIO_FLOAT64 ) {
10621 for ( unsigned int i=0; i<samples; i++ ) {
10622 // Swap 1st and 8th bytes
10627 // Swap 2nd and 7th bytes
10633 // Swap 3rd and 6th bytes
10639 // Swap 4th and 5th bytes
10645 // Increment 5 more bytes.
10651 // Indentation settings for Vim and Emacs
10653 // Local Variables:
10654 // c-basic-offset: 2
10655 // indent-tabs-mode: nil
10658 // vim: et sts=2 sw=2