1 /************************************************************************/
3 \brief Realtime audio i/o C++ classes.
5 RtAudio provides a common API (Application Programming Interface)
6 for realtime audio input/output across Linux (native ALSA, Jack,
7 and OSS), Macintosh OS X (CoreAudio and Jack), and Windows
8 (DirectSound, ASIO and WASAPI) operating systems.
10 RtAudio WWW site: http://www.music.mcgill.ca/~gary/rtaudio/
12 RtAudio: realtime audio i/o C++ classes
13 Copyright (c) 2001-2017 Gary P. Scavone
15 Permission is hereby granted, free of charge, to any person
16 obtaining a copy of this software and associated documentation files
17 (the "Software"), to deal in the Software without restriction,
18 including without limitation the rights to use, copy, modify, merge,
19 publish, distribute, sublicense, and/or sell copies of the Software,
20 and to permit persons to whom the Software is furnished to do so,
21 subject to the following conditions:
23 The above copyright notice and this permission notice shall be
24 included in all copies or substantial portions of the Software.
26 Any person wishing to distribute modifications to the Software is
27 asked to send the modifications to the original developer so that
28 they can be incorporated into the canonical version. This is,
29 however, not a binding provision of this license.
31 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
32 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
33 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
34 IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
35 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
36 CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
37 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
39 /************************************************************************/
41 // RtAudio: Version 5.0.0
51 // Static variable definitions.
52 const unsigned int RtApi::MAX_SAMPLE_RATES = 14;
53 const unsigned int RtApi::SAMPLE_RATES[] = {
54 4000, 5512, 8000, 9600, 11025, 16000, 22050,
55 32000, 44100, 48000, 88200, 96000, 176400, 192000
58 #if defined(__WINDOWS_DS__) || defined(__WINDOWS_ASIO__) || defined(__WINDOWS_WASAPI__)
59 #define MUTEX_INITIALIZE(A) InitializeCriticalSection(A)
60 #define MUTEX_DESTROY(A) DeleteCriticalSection(A)
61 #define MUTEX_LOCK(A) EnterCriticalSection(A)
62 #define MUTEX_UNLOCK(A) LeaveCriticalSection(A)
66 static std::string convertCharPointerToStdString(const char *text)
68 return std::string(text);
71 static std::string convertCharPointerToStdString(const wchar_t *text)
73 int length = WideCharToMultiByte(CP_UTF8, 0, text, -1, NULL, 0, NULL, NULL);
74 std::string s( length-1, '\0' );
75 WideCharToMultiByte(CP_UTF8, 0, text, -1, &s[0], length, NULL, NULL);
79 #elif defined(__LINUX_ALSA__) || defined(__LINUX_PULSE__) || defined(__UNIX_JACK__) || defined(__LINUX_OSS__) || defined(__MACOSX_CORE__)
81 #define MUTEX_INITIALIZE(A) pthread_mutex_init(A, NULL)
82 #define MUTEX_DESTROY(A) pthread_mutex_destroy(A)
83 #define MUTEX_LOCK(A) pthread_mutex_lock(A)
84 #define MUTEX_UNLOCK(A) pthread_mutex_unlock(A)
86 #define MUTEX_INITIALIZE(A) abs(*A) // dummy definitions
87 #define MUTEX_DESTROY(A) abs(*A) // dummy definitions
90 // *************************************************** //
92 // RtAudio definitions.
94 // *************************************************** //
96 std::string RtAudio :: getVersion( void )
98 return RTAUDIO_VERSION;
101 void RtAudio :: getCompiledApi( std::vector<RtAudio::Api> &apis )
105 // The order here will control the order of RtAudio's API search in
107 #if defined(__UNIX_JACK__)
108 apis.push_back( UNIX_JACK );
110 #if defined(__LINUX_PULSE__)
111 apis.push_back( LINUX_PULSE );
113 #if defined(__LINUX_ALSA__)
114 apis.push_back( LINUX_ALSA );
116 #if defined(__LINUX_OSS__)
117 apis.push_back( LINUX_OSS );
119 #if defined(__WINDOWS_ASIO__)
120 apis.push_back( WINDOWS_ASIO );
122 #if defined(__WINDOWS_WASAPI__)
123 apis.push_back( WINDOWS_WASAPI );
125 #if defined(__WINDOWS_DS__)
126 apis.push_back( WINDOWS_DS );
128 #if defined(__MACOSX_CORE__)
129 apis.push_back( MACOSX_CORE );
131 #if defined(__RTAUDIO_DUMMY__)
132 apis.push_back( RTAUDIO_DUMMY );
136 void RtAudio :: openRtApi( RtAudio::Api api )
142 #if defined(__UNIX_JACK__)
143 if ( api == UNIX_JACK )
144 rtapi_ = new RtApiJack();
146 #if defined(__LINUX_ALSA__)
147 if ( api == LINUX_ALSA )
148 rtapi_ = new RtApiAlsa();
150 #if defined(__LINUX_PULSE__)
151 if ( api == LINUX_PULSE )
152 rtapi_ = new RtApiPulse();
154 #if defined(__LINUX_OSS__)
155 if ( api == LINUX_OSS )
156 rtapi_ = new RtApiOss();
158 #if defined(__WINDOWS_ASIO__)
159 if ( api == WINDOWS_ASIO )
160 rtapi_ = new RtApiAsio();
162 #if defined(__WINDOWS_WASAPI__)
163 if ( api == WINDOWS_WASAPI )
164 rtapi_ = new RtApiWasapi();
166 #if defined(__WINDOWS_DS__)
167 if ( api == WINDOWS_DS )
168 rtapi_ = new RtApiDs();
170 #if defined(__MACOSX_CORE__)
171 if ( api == MACOSX_CORE )
172 rtapi_ = new RtApiCore();
174 #if defined(__RTAUDIO_DUMMY__)
175 if ( api == RTAUDIO_DUMMY )
176 rtapi_ = new RtApiDummy();
180 RtAudio :: RtAudio( RtAudio::Api api )
184 if ( api != UNSPECIFIED ) {
185 // Attempt to open the specified API.
187 if ( rtapi_ ) return;
189 // No compiled support for specified API value. Issue a debug
190 // warning and continue as if no API was specified.
191 std::cerr << "\nRtAudio: no compiled support for specified API argument!\n" << std::endl;
194 // Iterate through the compiled APIs and return as soon as we find
195 // one with at least one device or we reach the end of the list.
196 std::vector< RtAudio::Api > apis;
197 getCompiledApi( apis );
198 for ( unsigned int i=0; i<apis.size(); i++ ) {
199 openRtApi( apis[i] );
200 if ( rtapi_ && rtapi_->getDeviceCount() ) break;
203 if ( rtapi_ ) return;
205 // It should not be possible to get here because the preprocessor
206 // definition __RTAUDIO_DUMMY__ is automatically defined if no
207 // API-specific definitions are passed to the compiler. But just in
208 // case something weird happens, we'll thow an error.
209 std::string errorText = "\nRtAudio: no compiled API support found ... critical error!!\n\n";
210 throw( RtAudioError( errorText, RtAudioError::UNSPECIFIED ) );
213 RtAudio :: ~RtAudio()
219 void RtAudio :: openStream( RtAudio::StreamParameters *outputParameters,
220 RtAudio::StreamParameters *inputParameters,
221 RtAudioFormat format, unsigned int sampleRate,
222 unsigned int *bufferFrames,
223 RtAudioCallback callback, void *userData,
224 RtAudio::StreamOptions *options,
225 RtAudioErrorCallback errorCallback )
227 return rtapi_->openStream( outputParameters, inputParameters, format,
228 sampleRate, bufferFrames, callback,
229 userData, options, errorCallback );
232 // *************************************************** //
234 // Public RtApi definitions (see end of file for
235 // private or protected utility functions).
237 // *************************************************** //
241 stream_.state = STREAM_CLOSED;
242 stream_.mode = UNINITIALIZED;
243 stream_.apiHandle = 0;
244 stream_.userBuffer[0] = 0;
245 stream_.userBuffer[1] = 0;
246 MUTEX_INITIALIZE( &stream_.mutex );
247 showWarnings_ = true;
248 firstErrorOccurred_ = false;
253 MUTEX_DESTROY( &stream_.mutex );
256 void RtApi :: openStream( RtAudio::StreamParameters *oParams,
257 RtAudio::StreamParameters *iParams,
258 RtAudioFormat format, unsigned int sampleRate,
259 unsigned int *bufferFrames,
260 RtAudioCallback callback, void *userData,
261 RtAudio::StreamOptions *options,
262 RtAudioErrorCallback errorCallback )
264 if ( stream_.state != STREAM_CLOSED ) {
265 errorText_ = "RtApi::openStream: a stream is already open!";
266 error( RtAudioError::INVALID_USE );
270 // Clear stream information potentially left from a previously open stream.
273 if ( oParams && oParams->nChannels < 1 ) {
274 errorText_ = "RtApi::openStream: a non-NULL output StreamParameters structure cannot have an nChannels value less than one.";
275 error( RtAudioError::INVALID_USE );
279 if ( iParams && iParams->nChannels < 1 ) {
280 errorText_ = "RtApi::openStream: a non-NULL input StreamParameters structure cannot have an nChannels value less than one.";
281 error( RtAudioError::INVALID_USE );
285 if ( oParams == NULL && iParams == NULL ) {
286 errorText_ = "RtApi::openStream: input and output StreamParameters structures are both NULL!";
287 error( RtAudioError::INVALID_USE );
291 if ( formatBytes(format) == 0 ) {
292 errorText_ = "RtApi::openStream: 'format' parameter value is undefined.";
293 error( RtAudioError::INVALID_USE );
297 unsigned int nDevices = getDeviceCount();
298 unsigned int oChannels = 0;
300 oChannels = oParams->nChannels;
301 if ( oParams->deviceId >= nDevices ) {
302 errorText_ = "RtApi::openStream: output device parameter value is invalid.";
303 error( RtAudioError::INVALID_USE );
308 unsigned int iChannels = 0;
310 iChannels = iParams->nChannels;
311 if ( iParams->deviceId >= nDevices ) {
312 errorText_ = "RtApi::openStream: input device parameter value is invalid.";
313 error( RtAudioError::INVALID_USE );
320 if ( oChannels > 0 ) {
322 result = probeDeviceOpen( oParams->deviceId, OUTPUT, oChannels, oParams->firstChannel,
323 sampleRate, format, bufferFrames, options );
324 if ( result == false ) {
325 error( RtAudioError::SYSTEM_ERROR );
330 if ( iChannels > 0 ) {
332 result = probeDeviceOpen( iParams->deviceId, INPUT, iChannels, iParams->firstChannel,
333 sampleRate, format, bufferFrames, options );
334 if ( result == false ) {
335 if ( oChannels > 0 ) closeStream();
336 error( RtAudioError::SYSTEM_ERROR );
341 stream_.callbackInfo.callback = (void *) callback;
342 stream_.callbackInfo.userData = userData;
343 stream_.callbackInfo.errorCallback = (void *) errorCallback;
345 if ( options ) options->numberOfBuffers = stream_.nBuffers;
346 stream_.state = STREAM_STOPPED;
349 unsigned int RtApi :: getDefaultInputDevice( void )
351 // Should be implemented in subclasses if possible.
355 unsigned int RtApi :: getDefaultOutputDevice( void )
357 // Should be implemented in subclasses if possible.
361 void RtApi :: closeStream( void )
363 // MUST be implemented in subclasses!
367 bool RtApi :: probeDeviceOpen( unsigned int /*device*/, StreamMode /*mode*/, unsigned int /*channels*/,
368 unsigned int /*firstChannel*/, unsigned int /*sampleRate*/,
369 RtAudioFormat /*format*/, unsigned int * /*bufferSize*/,
370 RtAudio::StreamOptions * /*options*/ )
372 // MUST be implemented in subclasses!
376 void RtApi :: tickStreamTime( void )
378 // Subclasses that do not provide their own implementation of
379 // getStreamTime should call this function once per buffer I/O to
380 // provide basic stream time support.
382 stream_.streamTime += ( stream_.bufferSize * 1.0 / stream_.sampleRate );
384 #if defined( HAVE_GETTIMEOFDAY )
385 gettimeofday( &stream_.lastTickTimestamp, NULL );
389 long RtApi :: getStreamLatency( void )
393 long totalLatency = 0;
394 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
395 totalLatency = stream_.latency[0];
396 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
397 totalLatency += stream_.latency[1];
402 double RtApi :: getStreamTime( void )
406 #if defined( HAVE_GETTIMEOFDAY )
407 // Return a very accurate estimate of the stream time by
408 // adding in the elapsed time since the last tick.
412 if ( stream_.state != STREAM_RUNNING || stream_.streamTime == 0.0 )
413 return stream_.streamTime;
415 gettimeofday( &now, NULL );
416 then = stream_.lastTickTimestamp;
417 return stream_.streamTime +
418 ((now.tv_sec + 0.000001 * now.tv_usec) -
419 (then.tv_sec + 0.000001 * then.tv_usec));
421 return stream_.streamTime;
425 void RtApi :: setStreamTime( double time )
430 stream_.streamTime = time;
431 #if defined( HAVE_GETTIMEOFDAY )
432 gettimeofday( &stream_.lastTickTimestamp, NULL );
436 unsigned int RtApi :: getStreamSampleRate( void )
440 return stream_.sampleRate;
444 // *************************************************** //
446 // OS/API-specific methods.
448 // *************************************************** //
450 #if defined(__MACOSX_CORE__)
452 // The OS X CoreAudio API is designed to use a separate callback
453 // procedure for each of its audio devices. A single RtAudio duplex
454 // stream using two different devices is supported here, though it
455 // cannot be guaranteed to always behave correctly because we cannot
456 // synchronize these two callbacks.
458 // A property listener is installed for over/underrun information.
459 // However, no functionality is currently provided to allow property
460 // listeners to trigger user handlers because it is unclear what could
461 // be done if a critical stream parameter (buffer size, sample rate,
462 // device disconnect) notification arrived. The listeners entail
463 // quite a bit of extra code and most likely, a user program wouldn't
464 // be prepared for the result anyway. However, we do provide a flag
465 // to the client callback function to inform of an over/underrun.
467 // A structure to hold various information related to the CoreAudio API
470 AudioDeviceID id[2]; // device ids
471 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
472 AudioDeviceIOProcID procId[2];
474 UInt32 iStream[2]; // device stream index (or first if using multiple)
475 UInt32 nStreams[2]; // number of streams to use
478 pthread_cond_t condition;
479 int drainCounter; // Tracks callback counts when draining
480 bool internalDrain; // Indicates if stop is initiated from callback or not.
483 :deviceBuffer(0), drainCounter(0), internalDrain(false) { nStreams[0] = 1; nStreams[1] = 1; id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
486 RtApiCore:: RtApiCore()
488 #if defined( AVAILABLE_MAC_OS_X_VERSION_10_6_AND_LATER )
489 // This is a largely undocumented but absolutely necessary
490 // requirement starting with OS-X 10.6. If not called, queries and
491 // updates to various audio device properties are not handled
493 CFRunLoopRef theRunLoop = NULL;
494 AudioObjectPropertyAddress property = { kAudioHardwarePropertyRunLoop,
495 kAudioObjectPropertyScopeGlobal,
496 kAudioObjectPropertyElementMaster };
497 OSStatus result = AudioObjectSetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, sizeof(CFRunLoopRef), &theRunLoop);
498 if ( result != noErr ) {
499 errorText_ = "RtApiCore::RtApiCore: error setting run loop property!";
500 error( RtAudioError::WARNING );
505 RtApiCore :: ~RtApiCore()
507 // The subclass destructor gets called before the base class
508 // destructor, so close an existing stream before deallocating
509 // apiDeviceId memory.
510 if ( stream_.state != STREAM_CLOSED ) closeStream();
513 unsigned int RtApiCore :: getDeviceCount( void )
515 // Find out how many audio devices there are, if any.
517 AudioObjectPropertyAddress propertyAddress = { kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
518 OSStatus result = AudioObjectGetPropertyDataSize( kAudioObjectSystemObject, &propertyAddress, 0, NULL, &dataSize );
519 if ( result != noErr ) {
520 errorText_ = "RtApiCore::getDeviceCount: OS-X error getting device info!";
521 error( RtAudioError::WARNING );
525 return dataSize / sizeof( AudioDeviceID );
528 unsigned int RtApiCore :: getDefaultInputDevice( void )
530 unsigned int nDevices = getDeviceCount();
531 if ( nDevices <= 1 ) return 0;
534 UInt32 dataSize = sizeof( AudioDeviceID );
535 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultInputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
536 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
537 if ( result != noErr ) {
538 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device.";
539 error( RtAudioError::WARNING );
543 dataSize *= nDevices;
544 AudioDeviceID deviceList[ nDevices ];
545 property.mSelector = kAudioHardwarePropertyDevices;
546 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
547 if ( result != noErr ) {
548 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device IDs.";
549 error( RtAudioError::WARNING );
553 for ( unsigned int i=0; i<nDevices; i++ )
554 if ( id == deviceList[i] ) return i;
556 errorText_ = "RtApiCore::getDefaultInputDevice: No default device found!";
557 error( RtAudioError::WARNING );
561 unsigned int RtApiCore :: getDefaultOutputDevice( void )
563 unsigned int nDevices = getDeviceCount();
564 if ( nDevices <= 1 ) return 0;
567 UInt32 dataSize = sizeof( AudioDeviceID );
568 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultOutputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
569 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
570 if ( result != noErr ) {
571 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device.";
572 error( RtAudioError::WARNING );
576 dataSize = sizeof( AudioDeviceID ) * nDevices;
577 AudioDeviceID deviceList[ nDevices ];
578 property.mSelector = kAudioHardwarePropertyDevices;
579 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
580 if ( result != noErr ) {
581 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device IDs.";
582 error( RtAudioError::WARNING );
586 for ( unsigned int i=0; i<nDevices; i++ )
587 if ( id == deviceList[i] ) return i;
589 errorText_ = "RtApiCore::getDefaultOutputDevice: No default device found!";
590 error( RtAudioError::WARNING );
594 RtAudio::DeviceInfo RtApiCore :: getDeviceInfo( unsigned int device )
596 RtAudio::DeviceInfo info;
600 unsigned int nDevices = getDeviceCount();
601 if ( nDevices == 0 ) {
602 errorText_ = "RtApiCore::getDeviceInfo: no devices found!";
603 error( RtAudioError::INVALID_USE );
607 if ( device >= nDevices ) {
608 errorText_ = "RtApiCore::getDeviceInfo: device ID is invalid!";
609 error( RtAudioError::INVALID_USE );
613 AudioDeviceID deviceList[ nDevices ];
614 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
615 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
616 kAudioObjectPropertyScopeGlobal,
617 kAudioObjectPropertyElementMaster };
618 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
619 0, NULL, &dataSize, (void *) &deviceList );
620 if ( result != noErr ) {
621 errorText_ = "RtApiCore::getDeviceInfo: OS-X system error getting device IDs.";
622 error( RtAudioError::WARNING );
626 AudioDeviceID id = deviceList[ device ];
628 // Get the device name.
631 dataSize = sizeof( CFStringRef );
632 property.mSelector = kAudioObjectPropertyManufacturer;
633 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
634 if ( result != noErr ) {
635 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device manufacturer.";
636 errorText_ = errorStream_.str();
637 error( RtAudioError::WARNING );
641 //const char *mname = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
642 int length = CFStringGetLength(cfname);
643 char *mname = (char *)malloc(length * 3 + 1);
644 #if defined( UNICODE ) || defined( _UNICODE )
645 CFStringGetCString(cfname, mname, length * 3 + 1, kCFStringEncodingUTF8);
647 CFStringGetCString(cfname, mname, length * 3 + 1, CFStringGetSystemEncoding());
649 info.name.append( (const char *)mname, strlen(mname) );
650 info.name.append( ": " );
654 property.mSelector = kAudioObjectPropertyName;
655 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
656 if ( result != noErr ) {
657 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device name.";
658 errorText_ = errorStream_.str();
659 error( RtAudioError::WARNING );
663 //const char *name = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
664 length = CFStringGetLength(cfname);
665 char *name = (char *)malloc(length * 3 + 1);
666 #if defined( UNICODE ) || defined( _UNICODE )
667 CFStringGetCString(cfname, name, length * 3 + 1, kCFStringEncodingUTF8);
669 CFStringGetCString(cfname, name, length * 3 + 1, CFStringGetSystemEncoding());
671 info.name.append( (const char *)name, strlen(name) );
675 // Get the output stream "configuration".
676 AudioBufferList *bufferList = nil;
677 property.mSelector = kAudioDevicePropertyStreamConfiguration;
678 property.mScope = kAudioDevicePropertyScopeOutput;
679 // property.mElement = kAudioObjectPropertyElementWildcard;
681 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
682 if ( result != noErr || dataSize == 0 ) {
683 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration info for device (" << device << ").";
684 errorText_ = errorStream_.str();
685 error( RtAudioError::WARNING );
689 // Allocate the AudioBufferList.
690 bufferList = (AudioBufferList *) malloc( dataSize );
691 if ( bufferList == NULL ) {
692 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating output AudioBufferList.";
693 error( RtAudioError::WARNING );
697 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
698 if ( result != noErr || dataSize == 0 ) {
700 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration for device (" << device << ").";
701 errorText_ = errorStream_.str();
702 error( RtAudioError::WARNING );
706 // Get output channel information.
707 unsigned int i, nStreams = bufferList->mNumberBuffers;
708 for ( i=0; i<nStreams; i++ )
709 info.outputChannels += bufferList->mBuffers[i].mNumberChannels;
712 // Get the input stream "configuration".
713 property.mScope = kAudioDevicePropertyScopeInput;
714 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
715 if ( result != noErr || dataSize == 0 ) {
716 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration info for device (" << device << ").";
717 errorText_ = errorStream_.str();
718 error( RtAudioError::WARNING );
722 // Allocate the AudioBufferList.
723 bufferList = (AudioBufferList *) malloc( dataSize );
724 if ( bufferList == NULL ) {
725 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating input AudioBufferList.";
726 error( RtAudioError::WARNING );
730 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
731 if (result != noErr || dataSize == 0) {
733 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration for device (" << device << ").";
734 errorText_ = errorStream_.str();
735 error( RtAudioError::WARNING );
739 // Get input channel information.
740 nStreams = bufferList->mNumberBuffers;
741 for ( i=0; i<nStreams; i++ )
742 info.inputChannels += bufferList->mBuffers[i].mNumberChannels;
745 // If device opens for both playback and capture, we determine the channels.
746 if ( info.outputChannels > 0 && info.inputChannels > 0 )
747 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
749 // Probe the device sample rates.
750 bool isInput = false;
751 if ( info.outputChannels == 0 ) isInput = true;
753 // Determine the supported sample rates.
754 property.mSelector = kAudioDevicePropertyAvailableNominalSampleRates;
755 if ( isInput == false ) property.mScope = kAudioDevicePropertyScopeOutput;
756 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
757 if ( result != kAudioHardwareNoError || dataSize == 0 ) {
758 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rate info.";
759 errorText_ = errorStream_.str();
760 error( RtAudioError::WARNING );
764 UInt32 nRanges = dataSize / sizeof( AudioValueRange );
765 AudioValueRange rangeList[ nRanges ];
766 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &rangeList );
767 if ( result != kAudioHardwareNoError ) {
768 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rates.";
769 errorText_ = errorStream_.str();
770 error( RtAudioError::WARNING );
774 // The sample rate reporting mechanism is a bit of a mystery. It
775 // seems that it can either return individual rates or a range of
776 // rates. I assume that if the min / max range values are the same,
777 // then that represents a single supported rate and if the min / max
778 // range values are different, the device supports an arbitrary
779 // range of values (though there might be multiple ranges, so we'll
780 // use the most conservative range).
781 Float64 minimumRate = 1.0, maximumRate = 10000000000.0;
782 bool haveValueRange = false;
783 info.sampleRates.clear();
784 for ( UInt32 i=0; i<nRanges; i++ ) {
785 if ( rangeList[i].mMinimum == rangeList[i].mMaximum ) {
786 unsigned int tmpSr = (unsigned int) rangeList[i].mMinimum;
787 info.sampleRates.push_back( tmpSr );
789 if ( !info.preferredSampleRate || ( tmpSr <= 48000 && tmpSr > info.preferredSampleRate ) )
790 info.preferredSampleRate = tmpSr;
793 haveValueRange = true;
794 if ( rangeList[i].mMinimum > minimumRate ) minimumRate = rangeList[i].mMinimum;
795 if ( rangeList[i].mMaximum < maximumRate ) maximumRate = rangeList[i].mMaximum;
799 if ( haveValueRange ) {
800 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
801 if ( SAMPLE_RATES[k] >= (unsigned int) minimumRate && SAMPLE_RATES[k] <= (unsigned int) maximumRate ) {
802 info.sampleRates.push_back( SAMPLE_RATES[k] );
804 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
805 info.preferredSampleRate = SAMPLE_RATES[k];
810 // Sort and remove any redundant values
811 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
812 info.sampleRates.erase( unique( info.sampleRates.begin(), info.sampleRates.end() ), info.sampleRates.end() );
814 if ( info.sampleRates.size() == 0 ) {
815 errorStream_ << "RtApiCore::probeDeviceInfo: No supported sample rates found for device (" << device << ").";
816 errorText_ = errorStream_.str();
817 error( RtAudioError::WARNING );
821 // CoreAudio always uses 32-bit floating point data for PCM streams.
822 // Thus, any other "physical" formats supported by the device are of
823 // no interest to the client.
824 info.nativeFormats = RTAUDIO_FLOAT32;
826 if ( info.outputChannels > 0 )
827 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
828 if ( info.inputChannels > 0 )
829 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
835 static OSStatus callbackHandler( AudioDeviceID inDevice,
836 const AudioTimeStamp* /*inNow*/,
837 const AudioBufferList* inInputData,
838 const AudioTimeStamp* /*inInputTime*/,
839 AudioBufferList* outOutputData,
840 const AudioTimeStamp* /*inOutputTime*/,
843 CallbackInfo *info = (CallbackInfo *) infoPointer;
845 RtApiCore *object = (RtApiCore *) info->object;
846 if ( object->callbackEvent( inDevice, inInputData, outOutputData ) == false )
847 return kAudioHardwareUnspecifiedError;
849 return kAudioHardwareNoError;
852 static OSStatus xrunListener( AudioObjectID /*inDevice*/,
854 const AudioObjectPropertyAddress properties[],
855 void* handlePointer )
857 CoreHandle *handle = (CoreHandle *) handlePointer;
858 for ( UInt32 i=0; i<nAddresses; i++ ) {
859 if ( properties[i].mSelector == kAudioDeviceProcessorOverload ) {
860 if ( properties[i].mScope == kAudioDevicePropertyScopeInput )
861 handle->xrun[1] = true;
863 handle->xrun[0] = true;
867 return kAudioHardwareNoError;
870 static OSStatus rateListener( AudioObjectID inDevice,
871 UInt32 /*nAddresses*/,
872 const AudioObjectPropertyAddress /*properties*/[],
875 Float64 *rate = (Float64 *) ratePointer;
876 UInt32 dataSize = sizeof( Float64 );
877 AudioObjectPropertyAddress property = { kAudioDevicePropertyNominalSampleRate,
878 kAudioObjectPropertyScopeGlobal,
879 kAudioObjectPropertyElementMaster };
880 AudioObjectGetPropertyData( inDevice, &property, 0, NULL, &dataSize, rate );
881 return kAudioHardwareNoError;
884 bool RtApiCore :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
885 unsigned int firstChannel, unsigned int sampleRate,
886 RtAudioFormat format, unsigned int *bufferSize,
887 RtAudio::StreamOptions *options )
890 unsigned int nDevices = getDeviceCount();
891 if ( nDevices == 0 ) {
892 // This should not happen because a check is made before this function is called.
893 errorText_ = "RtApiCore::probeDeviceOpen: no devices found!";
897 if ( device >= nDevices ) {
898 // This should not happen because a check is made before this function is called.
899 errorText_ = "RtApiCore::probeDeviceOpen: device ID is invalid!";
903 AudioDeviceID deviceList[ nDevices ];
904 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
905 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
906 kAudioObjectPropertyScopeGlobal,
907 kAudioObjectPropertyElementMaster };
908 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
909 0, NULL, &dataSize, (void *) &deviceList );
910 if ( result != noErr ) {
911 errorText_ = "RtApiCore::probeDeviceOpen: OS-X system error getting device IDs.";
915 AudioDeviceID id = deviceList[ device ];
917 // Setup for stream mode.
918 bool isInput = false;
919 if ( mode == INPUT ) {
921 property.mScope = kAudioDevicePropertyScopeInput;
924 property.mScope = kAudioDevicePropertyScopeOutput;
926 // Get the stream "configuration".
927 AudioBufferList *bufferList = nil;
929 property.mSelector = kAudioDevicePropertyStreamConfiguration;
930 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
931 if ( result != noErr || dataSize == 0 ) {
932 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration info for device (" << device << ").";
933 errorText_ = errorStream_.str();
937 // Allocate the AudioBufferList.
938 bufferList = (AudioBufferList *) malloc( dataSize );
939 if ( bufferList == NULL ) {
940 errorText_ = "RtApiCore::probeDeviceOpen: memory error allocating AudioBufferList.";
944 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
945 if (result != noErr || dataSize == 0) {
947 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration for device (" << device << ").";
948 errorText_ = errorStream_.str();
952 // Search for one or more streams that contain the desired number of
953 // channels. CoreAudio devices can have an arbitrary number of
954 // streams and each stream can have an arbitrary number of channels.
955 // For each stream, a single buffer of interleaved samples is
956 // provided. RtAudio prefers the use of one stream of interleaved
957 // data or multiple consecutive single-channel streams. However, we
958 // now support multiple consecutive multi-channel streams of
959 // interleaved data as well.
960 UInt32 iStream, offsetCounter = firstChannel;
961 UInt32 nStreams = bufferList->mNumberBuffers;
962 bool monoMode = false;
963 bool foundStream = false;
965 // First check that the device supports the requested number of
967 UInt32 deviceChannels = 0;
968 for ( iStream=0; iStream<nStreams; iStream++ )
969 deviceChannels += bufferList->mBuffers[iStream].mNumberChannels;
971 if ( deviceChannels < ( channels + firstChannel ) ) {
973 errorStream_ << "RtApiCore::probeDeviceOpen: the device (" << device << ") does not support the requested channel count.";
974 errorText_ = errorStream_.str();
978 // Look for a single stream meeting our needs.
979 UInt32 firstStream, streamCount = 1, streamChannels = 0, channelOffset = 0;
980 for ( iStream=0; iStream<nStreams; iStream++ ) {
981 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
982 if ( streamChannels >= channels + offsetCounter ) {
983 firstStream = iStream;
984 channelOffset = offsetCounter;
988 if ( streamChannels > offsetCounter ) break;
989 offsetCounter -= streamChannels;
992 // If we didn't find a single stream above, then we should be able
993 // to meet the channel specification with multiple streams.
994 if ( foundStream == false ) {
996 offsetCounter = firstChannel;
997 for ( iStream=0; iStream<nStreams; iStream++ ) {
998 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
999 if ( streamChannels > offsetCounter ) break;
1000 offsetCounter -= streamChannels;
1003 firstStream = iStream;
1004 channelOffset = offsetCounter;
1005 Int32 channelCounter = channels + offsetCounter - streamChannels;
1007 if ( streamChannels > 1 ) monoMode = false;
1008 while ( channelCounter > 0 ) {
1009 streamChannels = bufferList->mBuffers[++iStream].mNumberChannels;
1010 if ( streamChannels > 1 ) monoMode = false;
1011 channelCounter -= streamChannels;
1018 // Determine the buffer size.
1019 AudioValueRange bufferRange;
1020 dataSize = sizeof( AudioValueRange );
1021 property.mSelector = kAudioDevicePropertyBufferFrameSizeRange;
1022 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &bufferRange );
1024 if ( result != noErr ) {
1025 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting buffer size range for device (" << device << ").";
1026 errorText_ = errorStream_.str();
1030 if ( bufferRange.mMinimum > *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMinimum;
1031 else if ( bufferRange.mMaximum < *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMaximum;
1032 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) *bufferSize = (unsigned long) bufferRange.mMinimum;
1034 // Set the buffer size. For multiple streams, I'm assuming we only
1035 // need to make this setting for the master channel.
1036 UInt32 theSize = (UInt32) *bufferSize;
1037 dataSize = sizeof( UInt32 );
1038 property.mSelector = kAudioDevicePropertyBufferFrameSize;
1039 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &theSize );
1041 if ( result != noErr ) {
1042 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting the buffer size for device (" << device << ").";
1043 errorText_ = errorStream_.str();
1047 // If attempting to setup a duplex stream, the bufferSize parameter
1048 // MUST be the same in both directions!
1049 *bufferSize = theSize;
1050 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
1051 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << device << ").";
1052 errorText_ = errorStream_.str();
1056 stream_.bufferSize = *bufferSize;
1057 stream_.nBuffers = 1;
1059 // Try to set "hog" mode ... it's not clear to me this is working.
1060 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) {
1062 dataSize = sizeof( hog_pid );
1063 property.mSelector = kAudioDevicePropertyHogMode;
1064 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &hog_pid );
1065 if ( result != noErr ) {
1066 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting 'hog' state!";
1067 errorText_ = errorStream_.str();
1071 if ( hog_pid != getpid() ) {
1073 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &hog_pid );
1074 if ( result != noErr ) {
1075 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting 'hog' state!";
1076 errorText_ = errorStream_.str();
1082 // Check and if necessary, change the sample rate for the device.
1083 Float64 nominalRate;
1084 dataSize = sizeof( Float64 );
1085 property.mSelector = kAudioDevicePropertyNominalSampleRate;
1086 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &nominalRate );
1087 if ( result != noErr ) {
1088 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting current sample rate.";
1089 errorText_ = errorStream_.str();
1093 // Only change the sample rate if off by more than 1 Hz.
1094 if ( fabs( nominalRate - (double)sampleRate ) > 1.0 ) {
1096 // Set a property listener for the sample rate change
1097 Float64 reportedRate = 0.0;
1098 AudioObjectPropertyAddress tmp = { kAudioDevicePropertyNominalSampleRate, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
1099 result = AudioObjectAddPropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1100 if ( result != noErr ) {
1101 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate property listener for device (" << device << ").";
1102 errorText_ = errorStream_.str();
1106 nominalRate = (Float64) sampleRate;
1107 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &nominalRate );
1108 if ( result != noErr ) {
1109 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1110 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate for device (" << device << ").";
1111 errorText_ = errorStream_.str();
1115 // Now wait until the reported nominal rate is what we just set.
1116 UInt32 microCounter = 0;
1117 while ( reportedRate != nominalRate ) {
1118 microCounter += 5000;
1119 if ( microCounter > 5000000 ) break;
1123 // Remove the property listener.
1124 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1126 if ( microCounter > 5000000 ) {
1127 errorStream_ << "RtApiCore::probeDeviceOpen: timeout waiting for sample rate update for device (" << device << ").";
1128 errorText_ = errorStream_.str();
1133 // Now set the stream format for all streams. Also, check the
1134 // physical format of the device and change that if necessary.
1135 AudioStreamBasicDescription description;
1136 dataSize = sizeof( AudioStreamBasicDescription );
1137 property.mSelector = kAudioStreamPropertyVirtualFormat;
1138 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
1139 if ( result != noErr ) {
1140 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream format for device (" << device << ").";
1141 errorText_ = errorStream_.str();
1145 // Set the sample rate and data format id. However, only make the
1146 // change if the sample rate is not within 1.0 of the desired
1147 // rate and the format is not linear pcm.
1148 bool updateFormat = false;
1149 if ( fabs( description.mSampleRate - (Float64)sampleRate ) > 1.0 ) {
1150 description.mSampleRate = (Float64) sampleRate;
1151 updateFormat = true;
1154 if ( description.mFormatID != kAudioFormatLinearPCM ) {
1155 description.mFormatID = kAudioFormatLinearPCM;
1156 updateFormat = true;
1159 if ( updateFormat ) {
1160 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &description );
1161 if ( result != noErr ) {
1162 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate or data format for device (" << device << ").";
1163 errorText_ = errorStream_.str();
1168 // Now check the physical format.
1169 property.mSelector = kAudioStreamPropertyPhysicalFormat;
1170 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
1171 if ( result != noErr ) {
1172 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream physical format for device (" << device << ").";
1173 errorText_ = errorStream_.str();
1177 //std::cout << "Current physical stream format:" << std::endl;
1178 //std::cout << " mBitsPerChan = " << description.mBitsPerChannel << std::endl;
1179 //std::cout << " aligned high = " << (description.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (description.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
1180 //std::cout << " bytesPerFrame = " << description.mBytesPerFrame << std::endl;
1181 //std::cout << " sample rate = " << description.mSampleRate << std::endl;
1183 if ( description.mFormatID != kAudioFormatLinearPCM || description.mBitsPerChannel < 16 ) {
1184 description.mFormatID = kAudioFormatLinearPCM;
1185 //description.mSampleRate = (Float64) sampleRate;
1186 AudioStreamBasicDescription testDescription = description;
1189 // We'll try higher bit rates first and then work our way down.
1190 std::vector< std::pair<UInt32, UInt32> > physicalFormats;
1191 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsFloat) & ~kLinearPCMFormatFlagIsSignedInteger;
1192 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
1193 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
1194 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
1195 physicalFormats.push_back( std::pair<Float32, UInt32>( 24, formatFlags ) ); // 24-bit packed
1196 formatFlags &= ~( kAudioFormatFlagIsPacked | kAudioFormatFlagIsAlignedHigh );
1197 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.2, formatFlags ) ); // 24-bit in 4 bytes, aligned low
1198 formatFlags |= kAudioFormatFlagIsAlignedHigh;
1199 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.4, formatFlags ) ); // 24-bit in 4 bytes, aligned high
1200 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
1201 physicalFormats.push_back( std::pair<Float32, UInt32>( 16, formatFlags ) );
1202 physicalFormats.push_back( std::pair<Float32, UInt32>( 8, formatFlags ) );
1204 bool setPhysicalFormat = false;
1205 for( unsigned int i=0; i<physicalFormats.size(); i++ ) {
1206 testDescription = description;
1207 testDescription.mBitsPerChannel = (UInt32) physicalFormats[i].first;
1208 testDescription.mFormatFlags = physicalFormats[i].second;
1209 if ( (24 == (UInt32)physicalFormats[i].first) && ~( physicalFormats[i].second & kAudioFormatFlagIsPacked ) )
1210 testDescription.mBytesPerFrame = 4 * testDescription.mChannelsPerFrame;
1212 testDescription.mBytesPerFrame = testDescription.mBitsPerChannel/8 * testDescription.mChannelsPerFrame;
1213 testDescription.mBytesPerPacket = testDescription.mBytesPerFrame * testDescription.mFramesPerPacket;
1214 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &testDescription );
1215 if ( result == noErr ) {
1216 setPhysicalFormat = true;
1217 //std::cout << "Updated physical stream format:" << std::endl;
1218 //std::cout << " mBitsPerChan = " << testDescription.mBitsPerChannel << std::endl;
1219 //std::cout << " aligned high = " << (testDescription.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (testDescription.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
1220 //std::cout << " bytesPerFrame = " << testDescription.mBytesPerFrame << std::endl;
1221 //std::cout << " sample rate = " << testDescription.mSampleRate << std::endl;
1226 if ( !setPhysicalFormat ) {
1227 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting physical data format for device (" << device << ").";
1228 errorText_ = errorStream_.str();
1231 } // done setting virtual/physical formats.
1233 // Get the stream / device latency.
1235 dataSize = sizeof( UInt32 );
1236 property.mSelector = kAudioDevicePropertyLatency;
1237 if ( AudioObjectHasProperty( id, &property ) == true ) {
1238 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &latency );
1239 if ( result == kAudioHardwareNoError ) stream_.latency[ mode ] = latency;
1241 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting device latency for device (" << device << ").";
1242 errorText_ = errorStream_.str();
1243 error( RtAudioError::WARNING );
1247 // Byte-swapping: According to AudioHardware.h, the stream data will
1248 // always be presented in native-endian format, so we should never
1249 // need to byte swap.
1250 stream_.doByteSwap[mode] = false;
1252 // From the CoreAudio documentation, PCM data must be supplied as
1254 stream_.userFormat = format;
1255 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
1257 if ( streamCount == 1 )
1258 stream_.nDeviceChannels[mode] = description.mChannelsPerFrame;
1259 else // multiple streams
1260 stream_.nDeviceChannels[mode] = channels;
1261 stream_.nUserChannels[mode] = channels;
1262 stream_.channelOffset[mode] = channelOffset; // offset within a CoreAudio stream
1263 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
1264 else stream_.userInterleaved = true;
1265 stream_.deviceInterleaved[mode] = true;
1266 if ( monoMode == true ) stream_.deviceInterleaved[mode] = false;
1268 // Set flags for buffer conversion.
1269 stream_.doConvertBuffer[mode] = false;
1270 if ( stream_.userFormat != stream_.deviceFormat[mode] )
1271 stream_.doConvertBuffer[mode] = true;
1272 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
1273 stream_.doConvertBuffer[mode] = true;
1274 if ( streamCount == 1 ) {
1275 if ( stream_.nUserChannels[mode] > 1 &&
1276 stream_.userInterleaved != stream_.deviceInterleaved[mode] )
1277 stream_.doConvertBuffer[mode] = true;
1279 else if ( monoMode && stream_.userInterleaved )
1280 stream_.doConvertBuffer[mode] = true;
1282 // Allocate our CoreHandle structure for the stream.
1283 CoreHandle *handle = 0;
1284 if ( stream_.apiHandle == 0 ) {
1286 handle = new CoreHandle;
1288 catch ( std::bad_alloc& ) {
1289 errorText_ = "RtApiCore::probeDeviceOpen: error allocating CoreHandle memory.";
1293 if ( pthread_cond_init( &handle->condition, NULL ) ) {
1294 errorText_ = "RtApiCore::probeDeviceOpen: error initializing pthread condition variable.";
1297 stream_.apiHandle = (void *) handle;
1300 handle = (CoreHandle *) stream_.apiHandle;
1301 handle->iStream[mode] = firstStream;
1302 handle->nStreams[mode] = streamCount;
1303 handle->id[mode] = id;
1305 // Allocate necessary internal buffers.
1306 unsigned long bufferBytes;
1307 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
1308 // stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
1309 stream_.userBuffer[mode] = (char *) malloc( bufferBytes * sizeof(char) );
1310 memset( stream_.userBuffer[mode], 0, bufferBytes * sizeof(char) );
1311 if ( stream_.userBuffer[mode] == NULL ) {
1312 errorText_ = "RtApiCore::probeDeviceOpen: error allocating user buffer memory.";
1316 // If possible, we will make use of the CoreAudio stream buffers as
1317 // "device buffers". However, we can't do this if using multiple
1319 if ( stream_.doConvertBuffer[mode] && handle->nStreams[mode] > 1 ) {
1321 bool makeBuffer = true;
1322 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
1323 if ( mode == INPUT ) {
1324 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
1325 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
1326 if ( bufferBytes <= bytesOut ) makeBuffer = false;
1331 bufferBytes *= *bufferSize;
1332 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
1333 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
1334 if ( stream_.deviceBuffer == NULL ) {
1335 errorText_ = "RtApiCore::probeDeviceOpen: error allocating device buffer memory.";
1341 stream_.sampleRate = sampleRate;
1342 stream_.device[mode] = device;
1343 stream_.state = STREAM_STOPPED;
1344 stream_.callbackInfo.object = (void *) this;
1346 // Setup the buffer conversion information structure.
1347 if ( stream_.doConvertBuffer[mode] ) {
1348 if ( streamCount > 1 ) setConvertInfo( mode, 0 );
1349 else setConvertInfo( mode, channelOffset );
1352 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device )
1353 // Only one callback procedure per device.
1354 stream_.mode = DUPLEX;
1356 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1357 result = AudioDeviceCreateIOProcID( id, callbackHandler, (void *) &stream_.callbackInfo, &handle->procId[mode] );
1359 // deprecated in favor of AudioDeviceCreateIOProcID()
1360 result = AudioDeviceAddIOProc( id, callbackHandler, (void *) &stream_.callbackInfo );
1362 if ( result != noErr ) {
1363 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting callback for device (" << device << ").";
1364 errorText_ = errorStream_.str();
1367 if ( stream_.mode == OUTPUT && mode == INPUT )
1368 stream_.mode = DUPLEX;
1370 stream_.mode = mode;
1373 // Setup the device property listener for over/underload.
1374 property.mSelector = kAudioDeviceProcessorOverload;
1375 property.mScope = kAudioObjectPropertyScopeGlobal;
1376 result = AudioObjectAddPropertyListener( id, &property, xrunListener, (void *) handle );
1382 pthread_cond_destroy( &handle->condition );
1384 stream_.apiHandle = 0;
1387 for ( int i=0; i<2; i++ ) {
1388 if ( stream_.userBuffer[i] ) {
1389 free( stream_.userBuffer[i] );
1390 stream_.userBuffer[i] = 0;
1394 if ( stream_.deviceBuffer ) {
1395 free( stream_.deviceBuffer );
1396 stream_.deviceBuffer = 0;
1399 stream_.state = STREAM_CLOSED;
1403 void RtApiCore :: closeStream( void )
1405 if ( stream_.state == STREAM_CLOSED ) {
1406 errorText_ = "RtApiCore::closeStream(): no open stream to close!";
1407 error( RtAudioError::WARNING );
1411 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1412 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1414 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
1415 kAudioObjectPropertyScopeGlobal,
1416 kAudioObjectPropertyElementMaster };
1418 property.mSelector = kAudioDeviceProcessorOverload;
1419 property.mScope = kAudioObjectPropertyScopeGlobal;
1420 if (AudioObjectRemovePropertyListener( handle->id[0], &property, xrunListener, (void *) handle ) != noErr) {
1421 errorText_ = "RtApiCore::closeStream(): error removing property listener!";
1422 error( RtAudioError::WARNING );
1425 if ( stream_.state == STREAM_RUNNING )
1426 AudioDeviceStop( handle->id[0], callbackHandler );
1427 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1428 AudioDeviceDestroyIOProcID( handle->id[0], handle->procId[0] );
1430 // deprecated in favor of AudioDeviceDestroyIOProcID()
1431 AudioDeviceRemoveIOProc( handle->id[0], callbackHandler );
1435 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1437 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
1438 kAudioObjectPropertyScopeGlobal,
1439 kAudioObjectPropertyElementMaster };
1441 property.mSelector = kAudioDeviceProcessorOverload;
1442 property.mScope = kAudioObjectPropertyScopeGlobal;
1443 if (AudioObjectRemovePropertyListener( handle->id[1], &property, xrunListener, (void *) handle ) != noErr) {
1444 errorText_ = "RtApiCore::closeStream(): error removing property listener!";
1445 error( RtAudioError::WARNING );
1448 if ( stream_.state == STREAM_RUNNING )
1449 AudioDeviceStop( handle->id[1], callbackHandler );
1450 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1451 AudioDeviceDestroyIOProcID( handle->id[1], handle->procId[1] );
1453 // deprecated in favor of AudioDeviceDestroyIOProcID()
1454 AudioDeviceRemoveIOProc( handle->id[1], callbackHandler );
1458 for ( int i=0; i<2; i++ ) {
1459 if ( stream_.userBuffer[i] ) {
1460 free( stream_.userBuffer[i] );
1461 stream_.userBuffer[i] = 0;
1465 if ( stream_.deviceBuffer ) {
1466 free( stream_.deviceBuffer );
1467 stream_.deviceBuffer = 0;
1470 // Destroy pthread condition variable.
1471 pthread_cond_destroy( &handle->condition );
1473 stream_.apiHandle = 0;
1475 stream_.mode = UNINITIALIZED;
1476 stream_.state = STREAM_CLOSED;
1479 void RtApiCore :: startStream( void )
1482 if ( stream_.state == STREAM_RUNNING ) {
1483 errorText_ = "RtApiCore::startStream(): the stream is already running!";
1484 error( RtAudioError::WARNING );
1488 OSStatus result = noErr;
1489 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1490 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1492 result = AudioDeviceStart( handle->id[0], callbackHandler );
1493 if ( result != noErr ) {
1494 errorStream_ << "RtApiCore::startStream: system error (" << getErrorCode( result ) << ") starting callback procedure on device (" << stream_.device[0] << ").";
1495 errorText_ = errorStream_.str();
1500 if ( stream_.mode == INPUT ||
1501 ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1503 result = AudioDeviceStart( handle->id[1], callbackHandler );
1504 if ( result != noErr ) {
1505 errorStream_ << "RtApiCore::startStream: system error starting input callback procedure on device (" << stream_.device[1] << ").";
1506 errorText_ = errorStream_.str();
1511 handle->drainCounter = 0;
1512 handle->internalDrain = false;
1513 stream_.state = STREAM_RUNNING;
1516 if ( result == noErr ) return;
1517 error( RtAudioError::SYSTEM_ERROR );
1520 void RtApiCore :: stopStream( void )
1523 if ( stream_.state == STREAM_STOPPED ) {
1524 errorText_ = "RtApiCore::stopStream(): the stream is already stopped!";
1525 error( RtAudioError::WARNING );
1529 OSStatus result = noErr;
1530 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1531 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1533 if ( handle->drainCounter == 0 ) {
1534 handle->drainCounter = 2;
1535 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
1538 result = AudioDeviceStop( handle->id[0], callbackHandler );
1539 if ( result != noErr ) {
1540 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping callback procedure on device (" << stream_.device[0] << ").";
1541 errorText_ = errorStream_.str();
1546 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1548 result = AudioDeviceStop( handle->id[1], callbackHandler );
1549 if ( result != noErr ) {
1550 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping input callback procedure on device (" << stream_.device[1] << ").";
1551 errorText_ = errorStream_.str();
1556 stream_.state = STREAM_STOPPED;
1559 if ( result == noErr ) return;
1560 error( RtAudioError::SYSTEM_ERROR );
1563 void RtApiCore :: abortStream( void )
1566 if ( stream_.state == STREAM_STOPPED ) {
1567 errorText_ = "RtApiCore::abortStream(): the stream is already stopped!";
1568 error( RtAudioError::WARNING );
1572 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1573 handle->drainCounter = 2;
1578 // This function will be called by a spawned thread when the user
1579 // callback function signals that the stream should be stopped or
1580 // aborted. It is better to handle it this way because the
1581 // callbackEvent() function probably should return before the AudioDeviceStop()
1582 // function is called.
1583 static void *coreStopStream( void *ptr )
1585 CallbackInfo *info = (CallbackInfo *) ptr;
1586 RtApiCore *object = (RtApiCore *) info->object;
1588 object->stopStream();
1589 pthread_exit( NULL );
1592 bool RtApiCore :: callbackEvent( AudioDeviceID deviceId,
1593 const AudioBufferList *inBufferList,
1594 const AudioBufferList *outBufferList )
1596 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
1597 if ( stream_.state == STREAM_CLOSED ) {
1598 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
1599 error( RtAudioError::WARNING );
1603 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
1604 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1606 // Check if we were draining the stream and signal is finished.
1607 if ( handle->drainCounter > 3 ) {
1608 ThreadHandle threadId;
1610 stream_.state = STREAM_STOPPING;
1611 if ( handle->internalDrain == true )
1612 pthread_create( &threadId, NULL, coreStopStream, info );
1613 else // external call to stopStream()
1614 pthread_cond_signal( &handle->condition );
1618 AudioDeviceID outputDevice = handle->id[0];
1620 // Invoke user callback to get fresh output data UNLESS we are
1621 // draining stream or duplex mode AND the input/output devices are
1622 // different AND this function is called for the input device.
1623 if ( handle->drainCounter == 0 && ( stream_.mode != DUPLEX || deviceId == outputDevice ) ) {
1624 RtAudioCallback callback = (RtAudioCallback) info->callback;
1625 double streamTime = getStreamTime();
1626 RtAudioStreamStatus status = 0;
1627 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
1628 status |= RTAUDIO_OUTPUT_UNDERFLOW;
1629 handle->xrun[0] = false;
1631 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
1632 status |= RTAUDIO_INPUT_OVERFLOW;
1633 handle->xrun[1] = false;
1636 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
1637 stream_.bufferSize, streamTime, status, info->userData );
1638 if ( cbReturnValue == 2 ) {
1639 stream_.state = STREAM_STOPPING;
1640 handle->drainCounter = 2;
1644 else if ( cbReturnValue == 1 ) {
1645 handle->drainCounter = 1;
1646 handle->internalDrain = true;
1650 if ( stream_.mode == OUTPUT || ( stream_.mode == DUPLEX && deviceId == outputDevice ) ) {
1652 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
1654 if ( handle->nStreams[0] == 1 ) {
1655 memset( outBufferList->mBuffers[handle->iStream[0]].mData,
1657 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
1659 else { // fill multiple streams with zeros
1660 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
1661 memset( outBufferList->mBuffers[handle->iStream[0]+i].mData,
1663 outBufferList->mBuffers[handle->iStream[0]+i].mDataByteSize );
1667 else if ( handle->nStreams[0] == 1 ) {
1668 if ( stream_.doConvertBuffer[0] ) { // convert directly to CoreAudio stream buffer
1669 convertBuffer( (char *) outBufferList->mBuffers[handle->iStream[0]].mData,
1670 stream_.userBuffer[0], stream_.convertInfo[0] );
1672 else { // copy from user buffer
1673 memcpy( outBufferList->mBuffers[handle->iStream[0]].mData,
1674 stream_.userBuffer[0],
1675 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
1678 else { // fill multiple streams
1679 Float32 *inBuffer = (Float32 *) stream_.userBuffer[0];
1680 if ( stream_.doConvertBuffer[0] ) {
1681 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
1682 inBuffer = (Float32 *) stream_.deviceBuffer;
1685 if ( stream_.deviceInterleaved[0] == false ) { // mono mode
1686 UInt32 bufferBytes = outBufferList->mBuffers[handle->iStream[0]].mDataByteSize;
1687 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
1688 memcpy( outBufferList->mBuffers[handle->iStream[0]+i].mData,
1689 (void *)&inBuffer[i*stream_.bufferSize], bufferBytes );
1692 else { // fill multiple multi-channel streams with interleaved data
1693 UInt32 streamChannels, channelsLeft, inJump, outJump, inOffset;
1696 bool inInterleaved = ( stream_.userInterleaved ) ? true : false;
1697 UInt32 inChannels = stream_.nUserChannels[0];
1698 if ( stream_.doConvertBuffer[0] ) {
1699 inInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
1700 inChannels = stream_.nDeviceChannels[0];
1703 if ( inInterleaved ) inOffset = 1;
1704 else inOffset = stream_.bufferSize;
1706 channelsLeft = inChannels;
1707 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
1709 out = (Float32 *) outBufferList->mBuffers[handle->iStream[0]+i].mData;
1710 streamChannels = outBufferList->mBuffers[handle->iStream[0]+i].mNumberChannels;
1713 // Account for possible channel offset in first stream
1714 if ( i == 0 && stream_.channelOffset[0] > 0 ) {
1715 streamChannels -= stream_.channelOffset[0];
1716 outJump = stream_.channelOffset[0];
1720 // Account for possible unfilled channels at end of the last stream
1721 if ( streamChannels > channelsLeft ) {
1722 outJump = streamChannels - channelsLeft;
1723 streamChannels = channelsLeft;
1726 // Determine input buffer offsets and skips
1727 if ( inInterleaved ) {
1728 inJump = inChannels;
1729 in += inChannels - channelsLeft;
1733 in += (inChannels - channelsLeft) * inOffset;
1736 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
1737 for ( unsigned int j=0; j<streamChannels; j++ ) {
1738 *out++ = in[j*inOffset];
1743 channelsLeft -= streamChannels;
1749 // Don't bother draining input
1750 if ( handle->drainCounter ) {
1751 handle->drainCounter++;
1755 AudioDeviceID inputDevice;
1756 inputDevice = handle->id[1];
1757 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && deviceId == inputDevice ) ) {
1759 if ( handle->nStreams[1] == 1 ) {
1760 if ( stream_.doConvertBuffer[1] ) { // convert directly from CoreAudio stream buffer
1761 convertBuffer( stream_.userBuffer[1],
1762 (char *) inBufferList->mBuffers[handle->iStream[1]].mData,
1763 stream_.convertInfo[1] );
1765 else { // copy to user buffer
1766 memcpy( stream_.userBuffer[1],
1767 inBufferList->mBuffers[handle->iStream[1]].mData,
1768 inBufferList->mBuffers[handle->iStream[1]].mDataByteSize );
1771 else { // read from multiple streams
1772 Float32 *outBuffer = (Float32 *) stream_.userBuffer[1];
1773 if ( stream_.doConvertBuffer[1] ) outBuffer = (Float32 *) stream_.deviceBuffer;
1775 if ( stream_.deviceInterleaved[1] == false ) { // mono mode
1776 UInt32 bufferBytes = inBufferList->mBuffers[handle->iStream[1]].mDataByteSize;
1777 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
1778 memcpy( (void *)&outBuffer[i*stream_.bufferSize],
1779 inBufferList->mBuffers[handle->iStream[1]+i].mData, bufferBytes );
1782 else { // read from multiple multi-channel streams
1783 UInt32 streamChannels, channelsLeft, inJump, outJump, outOffset;
1786 bool outInterleaved = ( stream_.userInterleaved ) ? true : false;
1787 UInt32 outChannels = stream_.nUserChannels[1];
1788 if ( stream_.doConvertBuffer[1] ) {
1789 outInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
1790 outChannels = stream_.nDeviceChannels[1];
1793 if ( outInterleaved ) outOffset = 1;
1794 else outOffset = stream_.bufferSize;
1796 channelsLeft = outChannels;
1797 for ( unsigned int i=0; i<handle->nStreams[1]; i++ ) {
1799 in = (Float32 *) inBufferList->mBuffers[handle->iStream[1]+i].mData;
1800 streamChannels = inBufferList->mBuffers[handle->iStream[1]+i].mNumberChannels;
1803 // Account for possible channel offset in first stream
1804 if ( i == 0 && stream_.channelOffset[1] > 0 ) {
1805 streamChannels -= stream_.channelOffset[1];
1806 inJump = stream_.channelOffset[1];
1810 // Account for possible unread channels at end of the last stream
1811 if ( streamChannels > channelsLeft ) {
1812 inJump = streamChannels - channelsLeft;
1813 streamChannels = channelsLeft;
1816 // Determine output buffer offsets and skips
1817 if ( outInterleaved ) {
1818 outJump = outChannels;
1819 out += outChannels - channelsLeft;
1823 out += (outChannels - channelsLeft) * outOffset;
1826 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
1827 for ( unsigned int j=0; j<streamChannels; j++ ) {
1828 out[j*outOffset] = *in++;
1833 channelsLeft -= streamChannels;
1837 if ( stream_.doConvertBuffer[1] ) { // convert from our internal "device" buffer
1838 convertBuffer( stream_.userBuffer[1],
1839 stream_.deviceBuffer,
1840 stream_.convertInfo[1] );
1846 //MUTEX_UNLOCK( &stream_.mutex );
1848 RtApi::tickStreamTime();
1852 const char* RtApiCore :: getErrorCode( OSStatus code )
1856 case kAudioHardwareNotRunningError:
1857 return "kAudioHardwareNotRunningError";
1859 case kAudioHardwareUnspecifiedError:
1860 return "kAudioHardwareUnspecifiedError";
1862 case kAudioHardwareUnknownPropertyError:
1863 return "kAudioHardwareUnknownPropertyError";
1865 case kAudioHardwareBadPropertySizeError:
1866 return "kAudioHardwareBadPropertySizeError";
1868 case kAudioHardwareIllegalOperationError:
1869 return "kAudioHardwareIllegalOperationError";
1871 case kAudioHardwareBadObjectError:
1872 return "kAudioHardwareBadObjectError";
1874 case kAudioHardwareBadDeviceError:
1875 return "kAudioHardwareBadDeviceError";
1877 case kAudioHardwareBadStreamError:
1878 return "kAudioHardwareBadStreamError";
1880 case kAudioHardwareUnsupportedOperationError:
1881 return "kAudioHardwareUnsupportedOperationError";
1883 case kAudioDeviceUnsupportedFormatError:
1884 return "kAudioDeviceUnsupportedFormatError";
1886 case kAudioDevicePermissionsError:
1887 return "kAudioDevicePermissionsError";
1890 return "CoreAudio unknown error";
1894 //******************** End of __MACOSX_CORE__ *********************//
1897 #if defined(__UNIX_JACK__)
1899 // JACK is a low-latency audio server, originally written for the
1900 // GNU/Linux operating system and now also ported to OS-X. It can
1901 // connect a number of different applications to an audio device, as
1902 // well as allowing them to share audio between themselves.
1904 // When using JACK with RtAudio, "devices" refer to JACK clients that
1905 // have ports connected to the server. The JACK server is typically
1906 // started in a terminal as follows:
1908 // .jackd -d alsa -d hw:0
1910 // or through an interface program such as qjackctl. Many of the
1911 // parameters normally set for a stream are fixed by the JACK server
1912 // and can be specified when the JACK server is started. In
1915 // .jackd -d alsa -d hw:0 -r 44100 -p 512 -n 4
1917 // specifies a sample rate of 44100 Hz, a buffer size of 512 sample
1918 // frames, and number of buffers = 4. Once the server is running, it
1919 // is not possible to override these values. If the values are not
1920 // specified in the command-line, the JACK server uses default values.
1922 // The JACK server does not have to be running when an instance of
1923 // RtApiJack is created, though the function getDeviceCount() will
1924 // report 0 devices found until JACK has been started. When no
1925 // devices are available (i.e., the JACK server is not running), a
1926 // stream cannot be opened.
1928 #include <jack/jack.h>
1932 // A structure to hold various information related to the Jack API
1935 jack_client_t *client;
1936 jack_port_t **ports[2];
1937 std::string deviceName[2];
1939 pthread_cond_t condition;
1940 int drainCounter; // Tracks callback counts when draining
1941 bool internalDrain; // Indicates if stop is initiated from callback or not.
1944 :client(0), drainCounter(0), internalDrain(false) { ports[0] = 0; ports[1] = 0; xrun[0] = false; xrun[1] = false; }
1947 #if !defined(__RTAUDIO_DEBUG__)
1948 static void jackSilentError( const char * ) {};
1951 RtApiJack :: RtApiJack()
1952 :shouldAutoconnect_(true) {
1953 // Nothing to do here.
1954 #if !defined(__RTAUDIO_DEBUG__)
1955 // Turn off Jack's internal error reporting.
1956 jack_set_error_function( &jackSilentError );
1960 RtApiJack :: ~RtApiJack()
1962 if ( stream_.state != STREAM_CLOSED ) closeStream();
1965 unsigned int RtApiJack :: getDeviceCount( void )
1967 // See if we can become a jack client.
1968 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
1969 jack_status_t *status = NULL;
1970 jack_client_t *client = jack_client_open( "RtApiJackCount", options, status );
1971 if ( client == 0 ) return 0;
1974 std::string port, previousPort;
1975 unsigned int nChannels = 0, nDevices = 0;
1976 ports = jack_get_ports( client, NULL, JACK_DEFAULT_AUDIO_TYPE, 0 );
1978 // Parse the port names up to the first colon (:).
1981 port = (char *) ports[ nChannels ];
1982 iColon = port.find(":");
1983 if ( iColon != std::string::npos ) {
1984 port = port.substr( 0, iColon + 1 );
1985 if ( port != previousPort ) {
1987 previousPort = port;
1990 } while ( ports[++nChannels] );
1994 jack_client_close( client );
1998 RtAudio::DeviceInfo RtApiJack :: getDeviceInfo( unsigned int device )
2000 RtAudio::DeviceInfo info;
2001 info.probed = false;
2003 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption
2004 jack_status_t *status = NULL;
2005 jack_client_t *client = jack_client_open( "RtApiJackInfo", options, status );
2006 if ( client == 0 ) {
2007 errorText_ = "RtApiJack::getDeviceInfo: Jack server not found or connection error!";
2008 error( RtAudioError::WARNING );
2013 std::string port, previousPort;
2014 unsigned int nPorts = 0, nDevices = 0;
2015 ports = jack_get_ports( client, NULL, JACK_DEFAULT_AUDIO_TYPE, 0 );
2017 // Parse the port names up to the first colon (:).
2020 port = (char *) ports[ nPorts ];
2021 iColon = port.find(":");
2022 if ( iColon != std::string::npos ) {
2023 port = port.substr( 0, iColon );
2024 if ( port != previousPort ) {
2025 if ( nDevices == device ) info.name = port;
2027 previousPort = port;
2030 } while ( ports[++nPorts] );
2034 if ( device >= nDevices ) {
2035 jack_client_close( client );
2036 errorText_ = "RtApiJack::getDeviceInfo: device ID is invalid!";
2037 error( RtAudioError::INVALID_USE );
2041 // Get the current jack server sample rate.
2042 info.sampleRates.clear();
2044 info.preferredSampleRate = jack_get_sample_rate( client );
2045 info.sampleRates.push_back( info.preferredSampleRate );
2047 // Count the available ports containing the client name as device
2048 // channels. Jack "input ports" equal RtAudio output channels.
2049 unsigned int nChannels = 0;
2050 ports = jack_get_ports( client, info.name.c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput );
2052 while ( ports[ nChannels ] ) nChannels++;
2054 info.outputChannels = nChannels;
2057 // Jack "output ports" equal RtAudio input channels.
2059 ports = jack_get_ports( client, info.name.c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput );
2061 while ( ports[ nChannels ] ) nChannels++;
2063 info.inputChannels = nChannels;
2066 if ( info.outputChannels == 0 && info.inputChannels == 0 ) {
2067 jack_client_close(client);
2068 errorText_ = "RtApiJack::getDeviceInfo: error determining Jack input/output channels!";
2069 error( RtAudioError::WARNING );
2073 // If device opens for both playback and capture, we determine the channels.
2074 if ( info.outputChannels > 0 && info.inputChannels > 0 )
2075 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
2077 // Jack always uses 32-bit floats.
2078 info.nativeFormats = RTAUDIO_FLOAT32;
2080 // Jack doesn't provide default devices so we'll use the first available one.
2081 if ( device == 0 && info.outputChannels > 0 )
2082 info.isDefaultOutput = true;
2083 if ( device == 0 && info.inputChannels > 0 )
2084 info.isDefaultInput = true;
2086 jack_client_close(client);
2091 static int jackCallbackHandler( jack_nframes_t nframes, void *infoPointer )
2093 CallbackInfo *info = (CallbackInfo *) infoPointer;
2095 RtApiJack *object = (RtApiJack *) info->object;
2096 if ( object->callbackEvent( (unsigned long) nframes ) == false ) return 1;
2101 // This function will be called by a spawned thread when the Jack
2102 // server signals that it is shutting down. It is necessary to handle
2103 // it this way because the jackShutdown() function must return before
2104 // the jack_deactivate() function (in closeStream()) will return.
2105 static void *jackCloseStream( void *ptr )
2107 CallbackInfo *info = (CallbackInfo *) ptr;
2108 RtApiJack *object = (RtApiJack *) info->object;
2110 object->closeStream();
2112 pthread_exit( NULL );
2114 static void jackShutdown( void *infoPointer )
2116 CallbackInfo *info = (CallbackInfo *) infoPointer;
2117 RtApiJack *object = (RtApiJack *) info->object;
2119 // Check current stream state. If stopped, then we'll assume this
2120 // was called as a result of a call to RtApiJack::stopStream (the
2121 // deactivation of a client handle causes this function to be called).
2122 // If not, we'll assume the Jack server is shutting down or some
2123 // other problem occurred and we should close the stream.
2124 if ( object->isStreamRunning() == false ) return;
2126 ThreadHandle threadId;
2127 pthread_create( &threadId, NULL, jackCloseStream, info );
2128 std::cerr << "\nRtApiJack: the Jack server is shutting down this client ... stream stopped and closed!!\n" << std::endl;
2131 static int jackXrun( void *infoPointer )
2133 JackHandle *handle = *((JackHandle **) infoPointer);
2135 if ( handle->ports[0] ) handle->xrun[0] = true;
2136 if ( handle->ports[1] ) handle->xrun[1] = true;
2141 bool RtApiJack :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
2142 unsigned int firstChannel, unsigned int sampleRate,
2143 RtAudioFormat format, unsigned int *bufferSize,
2144 RtAudio::StreamOptions *options )
2146 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2148 // Look for jack server and try to become a client (only do once per stream).
2149 jack_client_t *client = 0;
2150 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) {
2151 jack_options_t jackoptions = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
2152 jack_status_t *status = NULL;
2153 if ( options && !options->streamName.empty() )
2154 client = jack_client_open( options->streamName.c_str(), jackoptions, status );
2156 client = jack_client_open( "RtApiJack", jackoptions, status );
2157 if ( client == 0 ) {
2158 errorText_ = "RtApiJack::probeDeviceOpen: Jack server not found or connection error!";
2159 error( RtAudioError::WARNING );
2164 // The handle must have been created on an earlier pass.
2165 client = handle->client;
2169 std::string port, previousPort, deviceName;
2170 unsigned int nPorts = 0, nDevices = 0;
2171 ports = jack_get_ports( client, NULL, JACK_DEFAULT_AUDIO_TYPE, 0 );
2173 // Parse the port names up to the first colon (:).
2176 port = (char *) ports[ nPorts ];
2177 iColon = port.find(":");
2178 if ( iColon != std::string::npos ) {
2179 port = port.substr( 0, iColon );
2180 if ( port != previousPort ) {
2181 if ( nDevices == device ) deviceName = port;
2183 previousPort = port;
2186 } while ( ports[++nPorts] );
2190 if ( device >= nDevices ) {
2191 errorText_ = "RtApiJack::probeDeviceOpen: device ID is invalid!";
2195 unsigned long flag = JackPortIsInput;
2196 if ( mode == INPUT ) flag = JackPortIsOutput;
2198 if ( ! (options && (options->flags & RTAUDIO_JACK_DONT_CONNECT)) ) {
2199 // Count the available ports containing the client name as device
2200 // channels. Jack "input ports" equal RtAudio output channels.
2201 unsigned int nChannels = 0;
2202 ports = jack_get_ports( client, deviceName.c_str(), JACK_DEFAULT_AUDIO_TYPE, flag );
2204 while ( ports[ nChannels ] ) nChannels++;
2207 // Compare the jack ports for specified client to the requested number of channels.
2208 if ( nChannels < (channels + firstChannel) ) {
2209 errorStream_ << "RtApiJack::probeDeviceOpen: requested number of channels (" << channels << ") + offset (" << firstChannel << ") not found for specified device (" << device << ":" << deviceName << ").";
2210 errorText_ = errorStream_.str();
2215 // Check the jack server sample rate.
2216 unsigned int jackRate = jack_get_sample_rate( client );
2217 if ( sampleRate != jackRate ) {
2218 jack_client_close( client );
2219 errorStream_ << "RtApiJack::probeDeviceOpen: the requested sample rate (" << sampleRate << ") is different than the JACK server rate (" << jackRate << ").";
2220 errorText_ = errorStream_.str();
2223 stream_.sampleRate = jackRate;
2225 // Get the latency of the JACK port.
2226 ports = jack_get_ports( client, deviceName.c_str(), JACK_DEFAULT_AUDIO_TYPE, flag );
2227 if ( ports[ firstChannel ] ) {
2229 jack_latency_callback_mode_t cbmode = (mode == INPUT ? JackCaptureLatency : JackPlaybackLatency);
2230 // the range (usually the min and max are equal)
2231 jack_latency_range_t latrange; latrange.min = latrange.max = 0;
2232 // get the latency range
2233 jack_port_get_latency_range( jack_port_by_name( client, ports[firstChannel] ), cbmode, &latrange );
2234 // be optimistic, use the min!
2235 stream_.latency[mode] = latrange.min;
2236 //stream_.latency[mode] = jack_port_get_latency( jack_port_by_name( client, ports[ firstChannel ] ) );
2240 // The jack server always uses 32-bit floating-point data.
2241 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
2242 stream_.userFormat = format;
2244 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
2245 else stream_.userInterleaved = true;
2247 // Jack always uses non-interleaved buffers.
2248 stream_.deviceInterleaved[mode] = false;
2250 // Jack always provides host byte-ordered data.
2251 stream_.doByteSwap[mode] = false;
2253 // Get the buffer size. The buffer size and number of buffers
2254 // (periods) is set when the jack server is started.
2255 stream_.bufferSize = (int) jack_get_buffer_size( client );
2256 *bufferSize = stream_.bufferSize;
2258 stream_.nDeviceChannels[mode] = channels;
2259 stream_.nUserChannels[mode] = channels;
2261 // Set flags for buffer conversion.
2262 stream_.doConvertBuffer[mode] = false;
2263 if ( stream_.userFormat != stream_.deviceFormat[mode] )
2264 stream_.doConvertBuffer[mode] = true;
2265 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
2266 stream_.nUserChannels[mode] > 1 )
2267 stream_.doConvertBuffer[mode] = true;
2269 // Allocate our JackHandle structure for the stream.
2270 if ( handle == 0 ) {
2272 handle = new JackHandle;
2274 catch ( std::bad_alloc& ) {
2275 errorText_ = "RtApiJack::probeDeviceOpen: error allocating JackHandle memory.";
2279 if ( pthread_cond_init(&handle->condition, NULL) ) {
2280 errorText_ = "RtApiJack::probeDeviceOpen: error initializing pthread condition variable.";
2283 stream_.apiHandle = (void *) handle;
2284 handle->client = client;
2286 handle->deviceName[mode] = deviceName;
2288 // Allocate necessary internal buffers.
2289 unsigned long bufferBytes;
2290 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
2291 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
2292 if ( stream_.userBuffer[mode] == NULL ) {
2293 errorText_ = "RtApiJack::probeDeviceOpen: error allocating user buffer memory.";
2297 if ( stream_.doConvertBuffer[mode] ) {
2299 bool makeBuffer = true;
2300 if ( mode == OUTPUT )
2301 bufferBytes = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
2302 else { // mode == INPUT
2303 bufferBytes = stream_.nDeviceChannels[1] * formatBytes( stream_.deviceFormat[1] );
2304 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
2305 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes(stream_.deviceFormat[0]);
2306 if ( bufferBytes < bytesOut ) makeBuffer = false;
2311 bufferBytes *= *bufferSize;
2312 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
2313 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
2314 if ( stream_.deviceBuffer == NULL ) {
2315 errorText_ = "RtApiJack::probeDeviceOpen: error allocating device buffer memory.";
2321 // Allocate memory for the Jack ports (channels) identifiers.
2322 handle->ports[mode] = (jack_port_t **) malloc ( sizeof (jack_port_t *) * channels );
2323 if ( handle->ports[mode] == NULL ) {
2324 errorText_ = "RtApiJack::probeDeviceOpen: error allocating port memory.";
2328 stream_.device[mode] = device;
2329 stream_.channelOffset[mode] = firstChannel;
2330 stream_.state = STREAM_STOPPED;
2331 stream_.callbackInfo.object = (void *) this;
2333 if ( stream_.mode == OUTPUT && mode == INPUT )
2334 // We had already set up the stream for output.
2335 stream_.mode = DUPLEX;
2337 stream_.mode = mode;
2338 jack_set_process_callback( handle->client, jackCallbackHandler, (void *) &stream_.callbackInfo );
2339 jack_set_xrun_callback( handle->client, jackXrun, (void *) &stream_.apiHandle );
2340 jack_on_shutdown( handle->client, jackShutdown, (void *) &stream_.callbackInfo );
2343 // Register our ports.
2345 if ( mode == OUTPUT ) {
2346 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2347 snprintf( label, 64, "outport %d", i );
2348 handle->ports[0][i] = jack_port_register( handle->client, (const char *)label,
2349 JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput, 0 );
2353 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2354 snprintf( label, 64, "inport %d", i );
2355 handle->ports[1][i] = jack_port_register( handle->client, (const char *)label,
2356 JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput, 0 );
2360 // Setup the buffer conversion information structure. We don't use
2361 // buffers to do channel offsets, so we override that parameter
2363 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
2365 if ( options && options->flags & RTAUDIO_JACK_DONT_CONNECT ) shouldAutoconnect_ = false;
2371 pthread_cond_destroy( &handle->condition );
2372 jack_client_close( handle->client );
2374 if ( handle->ports[0] ) free( handle->ports[0] );
2375 if ( handle->ports[1] ) free( handle->ports[1] );
2378 stream_.apiHandle = 0;
2381 for ( int i=0; i<2; i++ ) {
2382 if ( stream_.userBuffer[i] ) {
2383 free( stream_.userBuffer[i] );
2384 stream_.userBuffer[i] = 0;
2388 if ( stream_.deviceBuffer ) {
2389 free( stream_.deviceBuffer );
2390 stream_.deviceBuffer = 0;
2396 void RtApiJack :: closeStream( void )
2398 if ( stream_.state == STREAM_CLOSED ) {
2399 errorText_ = "RtApiJack::closeStream(): no open stream to close!";
2400 error( RtAudioError::WARNING );
2404 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2407 if ( stream_.state == STREAM_RUNNING )
2408 jack_deactivate( handle->client );
2410 jack_client_close( handle->client );
2414 if ( handle->ports[0] ) free( handle->ports[0] );
2415 if ( handle->ports[1] ) free( handle->ports[1] );
2416 pthread_cond_destroy( &handle->condition );
2418 stream_.apiHandle = 0;
2421 for ( int i=0; i<2; i++ ) {
2422 if ( stream_.userBuffer[i] ) {
2423 free( stream_.userBuffer[i] );
2424 stream_.userBuffer[i] = 0;
2428 if ( stream_.deviceBuffer ) {
2429 free( stream_.deviceBuffer );
2430 stream_.deviceBuffer = 0;
2433 stream_.mode = UNINITIALIZED;
2434 stream_.state = STREAM_CLOSED;
2437 void RtApiJack :: startStream( void )
2440 if ( stream_.state == STREAM_RUNNING ) {
2441 errorText_ = "RtApiJack::startStream(): the stream is already running!";
2442 error( RtAudioError::WARNING );
2446 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2447 int result = jack_activate( handle->client );
2449 errorText_ = "RtApiJack::startStream(): unable to activate JACK client!";
2455 // Get the list of available ports.
2456 if ( shouldAutoconnect_ && (stream_.mode == OUTPUT || stream_.mode == DUPLEX) ) {
2458 ports = jack_get_ports( handle->client, handle->deviceName[0].c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput);
2459 if ( ports == NULL) {
2460 errorText_ = "RtApiJack::startStream(): error determining available JACK input ports!";
2464 // Now make the port connections. Since RtAudio wasn't designed to
2465 // allow the user to select particular channels of a device, we'll
2466 // just open the first "nChannels" ports with offset.
2467 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2469 if ( ports[ stream_.channelOffset[0] + i ] )
2470 result = jack_connect( handle->client, jack_port_name( handle->ports[0][i] ), ports[ stream_.channelOffset[0] + i ] );
2473 errorText_ = "RtApiJack::startStream(): error connecting output ports!";
2480 if ( shouldAutoconnect_ && (stream_.mode == INPUT || stream_.mode == DUPLEX) ) {
2482 ports = jack_get_ports( handle->client, handle->deviceName[1].c_str(), JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput );
2483 if ( ports == NULL) {
2484 errorText_ = "RtApiJack::startStream(): error determining available JACK output ports!";
2488 // Now make the port connections. See note above.
2489 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2491 if ( ports[ stream_.channelOffset[1] + i ] )
2492 result = jack_connect( handle->client, ports[ stream_.channelOffset[1] + i ], jack_port_name( handle->ports[1][i] ) );
2495 errorText_ = "RtApiJack::startStream(): error connecting input ports!";
2502 handle->drainCounter = 0;
2503 handle->internalDrain = false;
2504 stream_.state = STREAM_RUNNING;
2507 if ( result == 0 ) return;
2508 error( RtAudioError::SYSTEM_ERROR );
2511 void RtApiJack :: stopStream( void )
2514 if ( stream_.state == STREAM_STOPPED ) {
2515 errorText_ = "RtApiJack::stopStream(): the stream is already stopped!";
2516 error( RtAudioError::WARNING );
2520 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2521 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
2523 if ( handle->drainCounter == 0 ) {
2524 handle->drainCounter = 2;
2525 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
2529 jack_deactivate( handle->client );
2530 stream_.state = STREAM_STOPPED;
2533 void RtApiJack :: abortStream( void )
2536 if ( stream_.state == STREAM_STOPPED ) {
2537 errorText_ = "RtApiJack::abortStream(): the stream is already stopped!";
2538 error( RtAudioError::WARNING );
2542 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2543 handle->drainCounter = 2;
2548 // This function will be called by a spawned thread when the user
2549 // callback function signals that the stream should be stopped or
2550 // aborted. It is necessary to handle it this way because the
2551 // callbackEvent() function must return before the jack_deactivate()
2552 // function will return.
2553 static void *jackStopStream( void *ptr )
2555 CallbackInfo *info = (CallbackInfo *) ptr;
2556 RtApiJack *object = (RtApiJack *) info->object;
2558 object->stopStream();
2559 pthread_exit( NULL );
2562 bool RtApiJack :: callbackEvent( unsigned long nframes )
2564 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
2565 if ( stream_.state == STREAM_CLOSED ) {
2566 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
2567 error( RtAudioError::WARNING );
2570 if ( stream_.bufferSize != nframes ) {
2571 errorText_ = "RtApiCore::callbackEvent(): the JACK buffer size has changed ... cannot process!";
2572 error( RtAudioError::WARNING );
2576 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
2577 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2579 // Check if we were draining the stream and signal is finished.
2580 if ( handle->drainCounter > 3 ) {
2581 ThreadHandle threadId;
2583 stream_.state = STREAM_STOPPING;
2584 if ( handle->internalDrain == true )
2585 pthread_create( &threadId, NULL, jackStopStream, info );
2587 pthread_cond_signal( &handle->condition );
2591 // Invoke user callback first, to get fresh output data.
2592 if ( handle->drainCounter == 0 ) {
2593 RtAudioCallback callback = (RtAudioCallback) info->callback;
2594 double streamTime = getStreamTime();
2595 RtAudioStreamStatus status = 0;
2596 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
2597 status |= RTAUDIO_OUTPUT_UNDERFLOW;
2598 handle->xrun[0] = false;
2600 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
2601 status |= RTAUDIO_INPUT_OVERFLOW;
2602 handle->xrun[1] = false;
2604 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
2605 stream_.bufferSize, streamTime, status, info->userData );
2606 if ( cbReturnValue == 2 ) {
2607 stream_.state = STREAM_STOPPING;
2608 handle->drainCounter = 2;
2610 pthread_create( &id, NULL, jackStopStream, info );
2613 else if ( cbReturnValue == 1 ) {
2614 handle->drainCounter = 1;
2615 handle->internalDrain = true;
2619 jack_default_audio_sample_t *jackbuffer;
2620 unsigned long bufferBytes = nframes * sizeof( jack_default_audio_sample_t );
2621 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
2623 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
2625 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
2626 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2627 memset( jackbuffer, 0, bufferBytes );
2631 else if ( stream_.doConvertBuffer[0] ) {
2633 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
2635 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
2636 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2637 memcpy( jackbuffer, &stream_.deviceBuffer[i*bufferBytes], bufferBytes );
2640 else { // no buffer conversion
2641 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2642 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2643 memcpy( jackbuffer, &stream_.userBuffer[0][i*bufferBytes], bufferBytes );
2648 // Don't bother draining input
2649 if ( handle->drainCounter ) {
2650 handle->drainCounter++;
2654 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
2656 if ( stream_.doConvertBuffer[1] ) {
2657 for ( unsigned int i=0; i<stream_.nDeviceChannels[1]; i++ ) {
2658 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
2659 memcpy( &stream_.deviceBuffer[i*bufferBytes], jackbuffer, bufferBytes );
2661 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
2663 else { // no buffer conversion
2664 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2665 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
2666 memcpy( &stream_.userBuffer[1][i*bufferBytes], jackbuffer, bufferBytes );
2672 RtApi::tickStreamTime();
2675 //******************** End of __UNIX_JACK__ *********************//
2678 #if defined(__WINDOWS_ASIO__) // ASIO API on Windows
2680 // The ASIO API is designed around a callback scheme, so this
2681 // implementation is similar to that used for OS-X CoreAudio and Linux
2682 // Jack. The primary constraint with ASIO is that it only allows
2683 // access to a single driver at a time. Thus, it is not possible to
2684 // have more than one simultaneous RtAudio stream.
2686 // This implementation also requires a number of external ASIO files
2687 // and a few global variables. The ASIO callback scheme does not
2688 // allow for the passing of user data, so we must create a global
2689 // pointer to our callbackInfo structure.
2691 // On unix systems, we make use of a pthread condition variable.
2692 // Since there is no equivalent in Windows, I hacked something based
2693 // on information found in
2694 // http://www.cs.wustl.edu/~schmidt/win32-cv-1.html.
2696 #include "asiosys.h"
2698 #include "iasiothiscallresolver.h"
2699 #include "asiodrivers.h"
2702 static AsioDrivers drivers;
2703 static ASIOCallbacks asioCallbacks;
2704 static ASIODriverInfo driverInfo;
2705 static CallbackInfo *asioCallbackInfo;
2706 static bool asioXRun;
2709 int drainCounter; // Tracks callback counts when draining
2710 bool internalDrain; // Indicates if stop is initiated from callback or not.
2711 ASIOBufferInfo *bufferInfos;
2715 :drainCounter(0), internalDrain(false), bufferInfos(0) {}
2718 // Function declarations (definitions at end of section)
2719 static const char* getAsioErrorString( ASIOError result );
2720 static void sampleRateChanged( ASIOSampleRate sRate );
2721 static long asioMessages( long selector, long value, void* message, double* opt );
2723 RtApiAsio :: RtApiAsio()
2725 // ASIO cannot run on a multi-threaded appartment. You can call
2726 // CoInitialize beforehand, but it must be for appartment threading
2727 // (in which case, CoInitilialize will return S_FALSE here).
2728 coInitialized_ = false;
2729 HRESULT hr = CoInitialize( NULL );
2731 errorText_ = "RtApiAsio::ASIO requires a single-threaded appartment. Call CoInitializeEx(0,COINIT_APARTMENTTHREADED)";
2732 error( RtAudioError::WARNING );
2734 coInitialized_ = true;
2736 drivers.removeCurrentDriver();
2737 driverInfo.asioVersion = 2;
2739 // See note in DirectSound implementation about GetDesktopWindow().
2740 driverInfo.sysRef = GetForegroundWindow();
2743 RtApiAsio :: ~RtApiAsio()
2745 if ( stream_.state != STREAM_CLOSED ) closeStream();
2746 if ( coInitialized_ ) CoUninitialize();
2749 unsigned int RtApiAsio :: getDeviceCount( void )
2751 return (unsigned int) drivers.asioGetNumDev();
2754 RtAudio::DeviceInfo RtApiAsio :: getDeviceInfo( unsigned int device )
2756 RtAudio::DeviceInfo info;
2757 info.probed = false;
2760 unsigned int nDevices = getDeviceCount();
2761 if ( nDevices == 0 ) {
2762 errorText_ = "RtApiAsio::getDeviceInfo: no devices found!";
2763 error( RtAudioError::INVALID_USE );
2767 if ( device >= nDevices ) {
2768 errorText_ = "RtApiAsio::getDeviceInfo: device ID is invalid!";
2769 error( RtAudioError::INVALID_USE );
2773 // If a stream is already open, we cannot probe other devices. Thus, use the saved results.
2774 if ( stream_.state != STREAM_CLOSED ) {
2775 if ( device >= devices_.size() ) {
2776 errorText_ = "RtApiAsio::getDeviceInfo: device ID was not present before stream was opened.";
2777 error( RtAudioError::WARNING );
2780 return devices_[ device ];
2783 char driverName[32];
2784 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
2785 if ( result != ASE_OK ) {
2786 errorStream_ << "RtApiAsio::getDeviceInfo: unable to get driver name (" << getAsioErrorString( result ) << ").";
2787 errorText_ = errorStream_.str();
2788 error( RtAudioError::WARNING );
2792 info.name = driverName;
2794 if ( !drivers.loadDriver( driverName ) ) {
2795 errorStream_ << "RtApiAsio::getDeviceInfo: unable to load driver (" << driverName << ").";
2796 errorText_ = errorStream_.str();
2797 error( RtAudioError::WARNING );
2801 result = ASIOInit( &driverInfo );
2802 if ( result != ASE_OK ) {
2803 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
2804 errorText_ = errorStream_.str();
2805 error( RtAudioError::WARNING );
2809 // Determine the device channel information.
2810 long inputChannels, outputChannels;
2811 result = ASIOGetChannels( &inputChannels, &outputChannels );
2812 if ( result != ASE_OK ) {
2813 drivers.removeCurrentDriver();
2814 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
2815 errorText_ = errorStream_.str();
2816 error( RtAudioError::WARNING );
2820 info.outputChannels = outputChannels;
2821 info.inputChannels = inputChannels;
2822 if ( info.outputChannels > 0 && info.inputChannels > 0 )
2823 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
2825 // Determine the supported sample rates.
2826 info.sampleRates.clear();
2827 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
2828 result = ASIOCanSampleRate( (ASIOSampleRate) SAMPLE_RATES[i] );
2829 if ( result == ASE_OK ) {
2830 info.sampleRates.push_back( SAMPLE_RATES[i] );
2832 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
2833 info.preferredSampleRate = SAMPLE_RATES[i];
2837 // Determine supported data types ... just check first channel and assume rest are the same.
2838 ASIOChannelInfo channelInfo;
2839 channelInfo.channel = 0;
2840 channelInfo.isInput = true;
2841 if ( info.inputChannels <= 0 ) channelInfo.isInput = false;
2842 result = ASIOGetChannelInfo( &channelInfo );
2843 if ( result != ASE_OK ) {
2844 drivers.removeCurrentDriver();
2845 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting driver channel info (" << driverName << ").";
2846 errorText_ = errorStream_.str();
2847 error( RtAudioError::WARNING );
2851 info.nativeFormats = 0;
2852 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB )
2853 info.nativeFormats |= RTAUDIO_SINT16;
2854 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB )
2855 info.nativeFormats |= RTAUDIO_SINT32;
2856 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB )
2857 info.nativeFormats |= RTAUDIO_FLOAT32;
2858 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB )
2859 info.nativeFormats |= RTAUDIO_FLOAT64;
2860 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB )
2861 info.nativeFormats |= RTAUDIO_SINT24;
2863 if ( info.outputChannels > 0 )
2864 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
2865 if ( info.inputChannels > 0 )
2866 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
2869 drivers.removeCurrentDriver();
2873 static void bufferSwitch( long index, ASIOBool /*processNow*/ )
2875 RtApiAsio *object = (RtApiAsio *) asioCallbackInfo->object;
2876 object->callbackEvent( index );
2879 void RtApiAsio :: saveDeviceInfo( void )
2883 unsigned int nDevices = getDeviceCount();
2884 devices_.resize( nDevices );
2885 for ( unsigned int i=0; i<nDevices; i++ )
2886 devices_[i] = getDeviceInfo( i );
2889 bool RtApiAsio :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
2890 unsigned int firstChannel, unsigned int sampleRate,
2891 RtAudioFormat format, unsigned int *bufferSize,
2892 RtAudio::StreamOptions *options )
2893 {////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
2895 bool isDuplexInput = mode == INPUT && stream_.mode == OUTPUT;
2897 // For ASIO, a duplex stream MUST use the same driver.
2898 if ( isDuplexInput && stream_.device[0] != device ) {
2899 errorText_ = "RtApiAsio::probeDeviceOpen: an ASIO duplex stream must use the same device for input and output!";
2903 char driverName[32];
2904 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
2905 if ( result != ASE_OK ) {
2906 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to get driver name (" << getAsioErrorString( result ) << ").";
2907 errorText_ = errorStream_.str();
2911 // Only load the driver once for duplex stream.
2912 if ( !isDuplexInput ) {
2913 // The getDeviceInfo() function will not work when a stream is open
2914 // because ASIO does not allow multiple devices to run at the same
2915 // time. Thus, we'll probe the system before opening a stream and
2916 // save the results for use by getDeviceInfo().
2917 this->saveDeviceInfo();
2919 if ( !drivers.loadDriver( driverName ) ) {
2920 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to load driver (" << driverName << ").";
2921 errorText_ = errorStream_.str();
2925 result = ASIOInit( &driverInfo );
2926 if ( result != ASE_OK ) {
2927 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
2928 errorText_ = errorStream_.str();
2933 // keep them before any "goto error", they are used for error cleanup + goto device boundary checks
2934 bool buffersAllocated = false;
2935 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
2936 unsigned int nChannels;
2939 // Check the device channel count.
2940 long inputChannels, outputChannels;
2941 result = ASIOGetChannels( &inputChannels, &outputChannels );
2942 if ( result != ASE_OK ) {
2943 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
2944 errorText_ = errorStream_.str();
2948 if ( ( mode == OUTPUT && (channels+firstChannel) > (unsigned int) outputChannels) ||
2949 ( mode == INPUT && (channels+firstChannel) > (unsigned int) inputChannels) ) {
2950 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested channel count (" << channels << ") + offset (" << firstChannel << ").";
2951 errorText_ = errorStream_.str();
2954 stream_.nDeviceChannels[mode] = channels;
2955 stream_.nUserChannels[mode] = channels;
2956 stream_.channelOffset[mode] = firstChannel;
2958 // Verify the sample rate is supported.
2959 result = ASIOCanSampleRate( (ASIOSampleRate) sampleRate );
2960 if ( result != ASE_OK ) {
2961 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested sample rate (" << sampleRate << ").";
2962 errorText_ = errorStream_.str();
2966 // Get the current sample rate
2967 ASIOSampleRate currentRate;
2968 result = ASIOGetSampleRate( ¤tRate );
2969 if ( result != ASE_OK ) {
2970 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error getting sample rate.";
2971 errorText_ = errorStream_.str();
2975 // Set the sample rate only if necessary
2976 if ( currentRate != sampleRate ) {
2977 result = ASIOSetSampleRate( (ASIOSampleRate) sampleRate );
2978 if ( result != ASE_OK ) {
2979 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error setting sample rate (" << sampleRate << ").";
2980 errorText_ = errorStream_.str();
2985 // Determine the driver data type.
2986 ASIOChannelInfo channelInfo;
2987 channelInfo.channel = 0;
2988 if ( mode == OUTPUT ) channelInfo.isInput = false;
2989 else channelInfo.isInput = true;
2990 result = ASIOGetChannelInfo( &channelInfo );
2991 if ( result != ASE_OK ) {
2992 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting data format.";
2993 errorText_ = errorStream_.str();
2997 // Assuming WINDOWS host is always little-endian.
2998 stream_.doByteSwap[mode] = false;
2999 stream_.userFormat = format;
3000 stream_.deviceFormat[mode] = 0;
3001 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB ) {
3002 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
3003 if ( channelInfo.type == ASIOSTInt16MSB ) stream_.doByteSwap[mode] = true;
3005 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB ) {
3006 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
3007 if ( channelInfo.type == ASIOSTInt32MSB ) stream_.doByteSwap[mode] = true;
3009 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB ) {
3010 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
3011 if ( channelInfo.type == ASIOSTFloat32MSB ) stream_.doByteSwap[mode] = true;
3013 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB ) {
3014 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
3015 if ( channelInfo.type == ASIOSTFloat64MSB ) stream_.doByteSwap[mode] = true;
3017 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB ) {
3018 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
3019 if ( channelInfo.type == ASIOSTInt24MSB ) stream_.doByteSwap[mode] = true;
3022 if ( stream_.deviceFormat[mode] == 0 ) {
3023 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") data format not supported by RtAudio.";
3024 errorText_ = errorStream_.str();
3028 // Set the buffer size. For a duplex stream, this will end up
3029 // setting the buffer size based on the input constraints, which
3031 long minSize, maxSize, preferSize, granularity;
3032 result = ASIOGetBufferSize( &minSize, &maxSize, &preferSize, &granularity );
3033 if ( result != ASE_OK ) {
3034 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting buffer size.";
3035 errorText_ = errorStream_.str();
3039 if ( isDuplexInput ) {
3040 // When this is the duplex input (output was opened before), then we have to use the same
3041 // buffersize as the output, because it might use the preferred buffer size, which most
3042 // likely wasn't passed as input to this. The buffer sizes have to be identically anyway,
3043 // So instead of throwing an error, make them equal. The caller uses the reference
3044 // to the "bufferSize" param as usual to set up processing buffers.
3046 *bufferSize = stream_.bufferSize;
3049 if ( *bufferSize == 0 ) *bufferSize = preferSize;
3050 else if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
3051 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
3052 else if ( granularity == -1 ) {
3053 // Make sure bufferSize is a power of two.
3054 int log2_of_min_size = 0;
3055 int log2_of_max_size = 0;
3057 for ( unsigned int i = 0; i < sizeof(long) * 8; i++ ) {
3058 if ( minSize & ((long)1 << i) ) log2_of_min_size = i;
3059 if ( maxSize & ((long)1 << i) ) log2_of_max_size = i;
3062 long min_delta = std::abs( (long)*bufferSize - ((long)1 << log2_of_min_size) );
3063 int min_delta_num = log2_of_min_size;
3065 for (int i = log2_of_min_size + 1; i <= log2_of_max_size; i++) {
3066 long current_delta = std::abs( (long)*bufferSize - ((long)1 << i) );
3067 if (current_delta < min_delta) {
3068 min_delta = current_delta;
3073 *bufferSize = ( (unsigned int)1 << min_delta_num );
3074 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
3075 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
3077 else if ( granularity != 0 ) {
3078 // Set to an even multiple of granularity, rounding up.
3079 *bufferSize = (*bufferSize + granularity-1) / granularity * granularity;
3084 // we don't use it anymore, see above!
3085 // Just left it here for the case...
3086 if ( isDuplexInput && stream_.bufferSize != *bufferSize ) {
3087 errorText_ = "RtApiAsio::probeDeviceOpen: input/output buffersize discrepancy!";
3092 stream_.bufferSize = *bufferSize;
3093 stream_.nBuffers = 2;
3095 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
3096 else stream_.userInterleaved = true;
3098 // ASIO always uses non-interleaved buffers.
3099 stream_.deviceInterleaved[mode] = false;
3101 // Allocate, if necessary, our AsioHandle structure for the stream.
3102 if ( handle == 0 ) {
3104 handle = new AsioHandle;
3106 catch ( std::bad_alloc& ) {
3107 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating AsioHandle memory.";
3110 handle->bufferInfos = 0;
3112 // Create a manual-reset event.
3113 handle->condition = CreateEvent( NULL, // no security
3114 TRUE, // manual-reset
3115 FALSE, // non-signaled initially
3117 stream_.apiHandle = (void *) handle;
3120 // Create the ASIO internal buffers. Since RtAudio sets up input
3121 // and output separately, we'll have to dispose of previously
3122 // created output buffers for a duplex stream.
3123 if ( mode == INPUT && stream_.mode == OUTPUT ) {
3124 ASIODisposeBuffers();
3125 if ( handle->bufferInfos ) free( handle->bufferInfos );
3128 // Allocate, initialize, and save the bufferInfos in our stream callbackInfo structure.
3130 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
3131 handle->bufferInfos = (ASIOBufferInfo *) malloc( nChannels * sizeof(ASIOBufferInfo) );
3132 if ( handle->bufferInfos == NULL ) {
3133 errorStream_ << "RtApiAsio::probeDeviceOpen: error allocating bufferInfo memory for driver (" << driverName << ").";
3134 errorText_ = errorStream_.str();
3138 ASIOBufferInfo *infos;
3139 infos = handle->bufferInfos;
3140 for ( i=0; i<stream_.nDeviceChannels[0]; i++, infos++ ) {
3141 infos->isInput = ASIOFalse;
3142 infos->channelNum = i + stream_.channelOffset[0];
3143 infos->buffers[0] = infos->buffers[1] = 0;
3145 for ( i=0; i<stream_.nDeviceChannels[1]; i++, infos++ ) {
3146 infos->isInput = ASIOTrue;
3147 infos->channelNum = i + stream_.channelOffset[1];
3148 infos->buffers[0] = infos->buffers[1] = 0;
3151 // prepare for callbacks
3152 stream_.sampleRate = sampleRate;
3153 stream_.device[mode] = device;
3154 stream_.mode = isDuplexInput ? DUPLEX : mode;
3156 // store this class instance before registering callbacks, that are going to use it
3157 asioCallbackInfo = &stream_.callbackInfo;
3158 stream_.callbackInfo.object = (void *) this;
3160 // Set up the ASIO callback structure and create the ASIO data buffers.
3161 asioCallbacks.bufferSwitch = &bufferSwitch;
3162 asioCallbacks.sampleRateDidChange = &sampleRateChanged;
3163 asioCallbacks.asioMessage = &asioMessages;
3164 asioCallbacks.bufferSwitchTimeInfo = NULL;
3165 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
3166 if ( result != ASE_OK ) {
3167 // Standard method failed. This can happen with strict/misbehaving drivers that return valid buffer size ranges
3168 // but only accept the preferred buffer size as parameter for ASIOCreateBuffers. eg. Creatives ASIO driver
3169 // in that case, let's be naïve and try that instead
3170 *bufferSize = preferSize;
3171 stream_.bufferSize = *bufferSize;
3172 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
3175 if ( result != ASE_OK ) {
3176 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") creating buffers.";
3177 errorText_ = errorStream_.str();
3180 buffersAllocated = true;
3181 stream_.state = STREAM_STOPPED;
3183 // Set flags for buffer conversion.
3184 stream_.doConvertBuffer[mode] = false;
3185 if ( stream_.userFormat != stream_.deviceFormat[mode] )
3186 stream_.doConvertBuffer[mode] = true;
3187 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
3188 stream_.nUserChannels[mode] > 1 )
3189 stream_.doConvertBuffer[mode] = true;
3191 // Allocate necessary internal buffers
3192 unsigned long bufferBytes;
3193 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
3194 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
3195 if ( stream_.userBuffer[mode] == NULL ) {
3196 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating user buffer memory.";
3200 if ( stream_.doConvertBuffer[mode] ) {
3202 bool makeBuffer = true;
3203 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
3204 if ( isDuplexInput && stream_.deviceBuffer ) {
3205 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
3206 if ( bufferBytes <= bytesOut ) makeBuffer = false;
3210 bufferBytes *= *bufferSize;
3211 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
3212 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
3213 if ( stream_.deviceBuffer == NULL ) {
3214 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating device buffer memory.";
3220 // Determine device latencies
3221 long inputLatency, outputLatency;
3222 result = ASIOGetLatencies( &inputLatency, &outputLatency );
3223 if ( result != ASE_OK ) {
3224 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting latency.";
3225 errorText_ = errorStream_.str();
3226 error( RtAudioError::WARNING); // warn but don't fail
3229 stream_.latency[0] = outputLatency;
3230 stream_.latency[1] = inputLatency;
3233 // Setup the buffer conversion information structure. We don't use
3234 // buffers to do channel offsets, so we override that parameter
3236 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
3241 if ( !isDuplexInput ) {
3242 // the cleanup for error in the duplex input, is done by RtApi::openStream
3243 // So we clean up for single channel only
3245 if ( buffersAllocated )
3246 ASIODisposeBuffers();
3248 drivers.removeCurrentDriver();
3251 CloseHandle( handle->condition );
3252 if ( handle->bufferInfos )
3253 free( handle->bufferInfos );
3256 stream_.apiHandle = 0;
3260 if ( stream_.userBuffer[mode] ) {
3261 free( stream_.userBuffer[mode] );
3262 stream_.userBuffer[mode] = 0;
3265 if ( stream_.deviceBuffer ) {
3266 free( stream_.deviceBuffer );
3267 stream_.deviceBuffer = 0;
3272 }////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
3274 void RtApiAsio :: closeStream()
3276 if ( stream_.state == STREAM_CLOSED ) {
3277 errorText_ = "RtApiAsio::closeStream(): no open stream to close!";
3278 error( RtAudioError::WARNING );
3282 if ( stream_.state == STREAM_RUNNING ) {
3283 stream_.state = STREAM_STOPPED;
3286 ASIODisposeBuffers();
3287 drivers.removeCurrentDriver();
3289 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3291 CloseHandle( handle->condition );
3292 if ( handle->bufferInfos )
3293 free( handle->bufferInfos );
3295 stream_.apiHandle = 0;
3298 for ( int i=0; i<2; i++ ) {
3299 if ( stream_.userBuffer[i] ) {
3300 free( stream_.userBuffer[i] );
3301 stream_.userBuffer[i] = 0;
3305 if ( stream_.deviceBuffer ) {
3306 free( stream_.deviceBuffer );
3307 stream_.deviceBuffer = 0;
3310 stream_.mode = UNINITIALIZED;
3311 stream_.state = STREAM_CLOSED;
3314 bool stopThreadCalled = false;
3316 void RtApiAsio :: startStream()
3319 if ( stream_.state == STREAM_RUNNING ) {
3320 errorText_ = "RtApiAsio::startStream(): the stream is already running!";
3321 error( RtAudioError::WARNING );
3325 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3326 ASIOError result = ASIOStart();
3327 if ( result != ASE_OK ) {
3328 errorStream_ << "RtApiAsio::startStream: error (" << getAsioErrorString( result ) << ") starting device.";
3329 errorText_ = errorStream_.str();
3333 handle->drainCounter = 0;
3334 handle->internalDrain = false;
3335 ResetEvent( handle->condition );
3336 stream_.state = STREAM_RUNNING;
3340 stopThreadCalled = false;
3342 if ( result == ASE_OK ) return;
3343 error( RtAudioError::SYSTEM_ERROR );
3346 void RtApiAsio :: stopStream()
3349 if ( stream_.state == STREAM_STOPPED ) {
3350 errorText_ = "RtApiAsio::stopStream(): the stream is already stopped!";
3351 error( RtAudioError::WARNING );
3355 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3356 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
3357 if ( handle->drainCounter == 0 ) {
3358 handle->drainCounter = 2;
3359 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
3363 stream_.state = STREAM_STOPPED;
3365 ASIOError result = ASIOStop();
3366 if ( result != ASE_OK ) {
3367 errorStream_ << "RtApiAsio::stopStream: error (" << getAsioErrorString( result ) << ") stopping device.";
3368 errorText_ = errorStream_.str();
3371 if ( result == ASE_OK ) return;
3372 error( RtAudioError::SYSTEM_ERROR );
3375 void RtApiAsio :: abortStream()
3378 if ( stream_.state == STREAM_STOPPED ) {
3379 errorText_ = "RtApiAsio::abortStream(): the stream is already stopped!";
3380 error( RtAudioError::WARNING );
3384 // The following lines were commented-out because some behavior was
3385 // noted where the device buffers need to be zeroed to avoid
3386 // continuing sound, even when the device buffers are completely
3387 // disposed. So now, calling abort is the same as calling stop.
3388 // AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3389 // handle->drainCounter = 2;
3393 // This function will be called by a spawned thread when the user
3394 // callback function signals that the stream should be stopped or
3395 // aborted. It is necessary to handle it this way because the
3396 // callbackEvent() function must return before the ASIOStop()
3397 // function will return.
3398 static unsigned __stdcall asioStopStream( void *ptr )
3400 CallbackInfo *info = (CallbackInfo *) ptr;
3401 RtApiAsio *object = (RtApiAsio *) info->object;
3403 object->stopStream();
3408 bool RtApiAsio :: callbackEvent( long bufferIndex )
3410 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
3411 if ( stream_.state == STREAM_CLOSED ) {
3412 errorText_ = "RtApiAsio::callbackEvent(): the stream is closed ... this shouldn't happen!";
3413 error( RtAudioError::WARNING );
3417 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
3418 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3420 // Check if we were draining the stream and signal if finished.
3421 if ( handle->drainCounter > 3 ) {
3423 stream_.state = STREAM_STOPPING;
3424 if ( handle->internalDrain == false )
3425 SetEvent( handle->condition );
3426 else { // spawn a thread to stop the stream
3428 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
3429 &stream_.callbackInfo, 0, &threadId );
3434 // Invoke user callback to get fresh output data UNLESS we are
3436 if ( handle->drainCounter == 0 ) {
3437 RtAudioCallback callback = (RtAudioCallback) info->callback;
3438 double streamTime = getStreamTime();
3439 RtAudioStreamStatus status = 0;
3440 if ( stream_.mode != INPUT && asioXRun == true ) {
3441 status |= RTAUDIO_OUTPUT_UNDERFLOW;
3444 if ( stream_.mode != OUTPUT && asioXRun == true ) {
3445 status |= RTAUDIO_INPUT_OVERFLOW;
3448 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
3449 stream_.bufferSize, streamTime, status, info->userData );
3450 if ( cbReturnValue == 2 ) {
3451 stream_.state = STREAM_STOPPING;
3452 handle->drainCounter = 2;
3454 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
3455 &stream_.callbackInfo, 0, &threadId );
3458 else if ( cbReturnValue == 1 ) {
3459 handle->drainCounter = 1;
3460 handle->internalDrain = true;
3464 unsigned int nChannels, bufferBytes, i, j;
3465 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
3466 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
3468 bufferBytes = stream_.bufferSize * formatBytes( stream_.deviceFormat[0] );
3470 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
3472 for ( i=0, j=0; i<nChannels; i++ ) {
3473 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3474 memset( handle->bufferInfos[i].buffers[bufferIndex], 0, bufferBytes );
3478 else if ( stream_.doConvertBuffer[0] ) {
3480 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
3481 if ( stream_.doByteSwap[0] )
3482 byteSwapBuffer( stream_.deviceBuffer,
3483 stream_.bufferSize * stream_.nDeviceChannels[0],
3484 stream_.deviceFormat[0] );
3486 for ( i=0, j=0; i<nChannels; i++ ) {
3487 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3488 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
3489 &stream_.deviceBuffer[j++*bufferBytes], bufferBytes );
3495 if ( stream_.doByteSwap[0] )
3496 byteSwapBuffer( stream_.userBuffer[0],
3497 stream_.bufferSize * stream_.nUserChannels[0],
3498 stream_.userFormat );
3500 for ( i=0, j=0; i<nChannels; i++ ) {
3501 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3502 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
3503 &stream_.userBuffer[0][bufferBytes*j++], bufferBytes );
3509 // Don't bother draining input
3510 if ( handle->drainCounter ) {
3511 handle->drainCounter++;
3515 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
3517 bufferBytes = stream_.bufferSize * formatBytes(stream_.deviceFormat[1]);
3519 if (stream_.doConvertBuffer[1]) {
3521 // Always interleave ASIO input data.
3522 for ( i=0, j=0; i<nChannels; i++ ) {
3523 if ( handle->bufferInfos[i].isInput == ASIOTrue )
3524 memcpy( &stream_.deviceBuffer[j++*bufferBytes],
3525 handle->bufferInfos[i].buffers[bufferIndex],
3529 if ( stream_.doByteSwap[1] )
3530 byteSwapBuffer( stream_.deviceBuffer,
3531 stream_.bufferSize * stream_.nDeviceChannels[1],
3532 stream_.deviceFormat[1] );
3533 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
3537 for ( i=0, j=0; i<nChannels; i++ ) {
3538 if ( handle->bufferInfos[i].isInput == ASIOTrue ) {
3539 memcpy( &stream_.userBuffer[1][bufferBytes*j++],
3540 handle->bufferInfos[i].buffers[bufferIndex],
3545 if ( stream_.doByteSwap[1] )
3546 byteSwapBuffer( stream_.userBuffer[1],
3547 stream_.bufferSize * stream_.nUserChannels[1],
3548 stream_.userFormat );
3553 // The following call was suggested by Malte Clasen. While the API
3554 // documentation indicates it should not be required, some device
3555 // drivers apparently do not function correctly without it.
3558 RtApi::tickStreamTime();
3562 static void sampleRateChanged( ASIOSampleRate sRate )
3564 // The ASIO documentation says that this usually only happens during
3565 // external sync. Audio processing is not stopped by the driver,
3566 // actual sample rate might not have even changed, maybe only the
3567 // sample rate status of an AES/EBU or S/PDIF digital input at the
3570 RtApi *object = (RtApi *) asioCallbackInfo->object;
3572 object->stopStream();
3574 catch ( RtAudioError &exception ) {
3575 std::cerr << "\nRtApiAsio: sampleRateChanged() error (" << exception.getMessage() << ")!\n" << std::endl;
3579 std::cerr << "\nRtApiAsio: driver reports sample rate changed to " << sRate << " ... stream stopped!!!\n" << std::endl;
3582 static long asioMessages( long selector, long value, void* /*message*/, double* /*opt*/ )
3586 switch( selector ) {
3587 case kAsioSelectorSupported:
3588 if ( value == kAsioResetRequest
3589 || value == kAsioEngineVersion
3590 || value == kAsioResyncRequest
3591 || value == kAsioLatenciesChanged
3592 // The following three were added for ASIO 2.0, you don't
3593 // necessarily have to support them.
3594 || value == kAsioSupportsTimeInfo
3595 || value == kAsioSupportsTimeCode
3596 || value == kAsioSupportsInputMonitor)
3599 case kAsioResetRequest:
3600 // Defer the task and perform the reset of the driver during the
3601 // next "safe" situation. You cannot reset the driver right now,
3602 // as this code is called from the driver. Reset the driver is
3603 // done by completely destruct is. I.e. ASIOStop(),
3604 // ASIODisposeBuffers(), Destruction Afterwards you initialize the
3606 std::cerr << "\nRtApiAsio: driver reset requested!!!" << std::endl;
3609 case kAsioResyncRequest:
3610 // This informs the application that the driver encountered some
3611 // non-fatal data loss. It is used for synchronization purposes
3612 // of different media. Added mainly to work around the Win16Mutex
3613 // problems in Windows 95/98 with the Windows Multimedia system,
3614 // which could lose data because the Mutex was held too long by
3615 // another thread. However a driver can issue it in other
3617 // std::cerr << "\nRtApiAsio: driver resync requested!!!" << std::endl;
3621 case kAsioLatenciesChanged:
3622 // This will inform the host application that the drivers were
3623 // latencies changed. Beware, it this does not mean that the
3624 // buffer sizes have changed! You might need to update internal
3626 std::cerr << "\nRtApiAsio: driver latency may have changed!!!" << std::endl;
3629 case kAsioEngineVersion:
3630 // Return the supported ASIO version of the host application. If
3631 // a host application does not implement this selector, ASIO 1.0
3632 // is assumed by the driver.
3635 case kAsioSupportsTimeInfo:
3636 // Informs the driver whether the
3637 // asioCallbacks.bufferSwitchTimeInfo() callback is supported.
3638 // For compatibility with ASIO 1.0 drivers the host application
3639 // should always support the "old" bufferSwitch method, too.
3642 case kAsioSupportsTimeCode:
3643 // Informs the driver whether application is interested in time
3644 // code info. If an application does not need to know about time
3645 // code, the driver has less work to do.
3652 static const char* getAsioErrorString( ASIOError result )
3660 static const Messages m[] =
3662 { ASE_NotPresent, "Hardware input or output is not present or available." },
3663 { ASE_HWMalfunction, "Hardware is malfunctioning." },
3664 { ASE_InvalidParameter, "Invalid input parameter." },
3665 { ASE_InvalidMode, "Invalid mode." },
3666 { ASE_SPNotAdvancing, "Sample position not advancing." },
3667 { ASE_NoClock, "Sample clock or rate cannot be determined or is not present." },
3668 { ASE_NoMemory, "Not enough memory to complete the request." }
3671 for ( unsigned int i = 0; i < sizeof(m)/sizeof(m[0]); ++i )
3672 if ( m[i].value == result ) return m[i].message;
3674 return "Unknown error.";
3677 //******************** End of __WINDOWS_ASIO__ *********************//
3681 #if defined(__WINDOWS_WASAPI__) // Windows WASAPI API
3683 // Authored by Marcus Tomlinson <themarcustomlinson@gmail.com>, April 2014
3684 // - Introduces support for the Windows WASAPI API
3685 // - Aims to deliver bit streams to and from hardware at the lowest possible latency, via the absolute minimum buffer sizes required
3686 // - Provides flexible stream configuration to an otherwise strict and inflexible WASAPI interface
3687 // - Includes automatic internal conversion of sample rate and buffer size between hardware and the user
3692 #include <audioclient.h>
3694 #include <mmdeviceapi.h>
3695 #include <functiondiscoverykeys_devpkey.h>
3698 #include <mferror.h>
3700 #include <Wmcodecdsp.h>
3702 #pragma comment( lib, "mfplat.lib" )
3703 #pragma comment( lib, "mfuuid.lib" )
3704 #pragma comment( lib, "wmcodecdspuuid" )
3706 //=============================================================================
3708 #define SAFE_RELEASE( objectPtr )\
3711 objectPtr->Release();\
3715 typedef HANDLE ( __stdcall *TAvSetMmThreadCharacteristicsPtr )( LPCWSTR TaskName, LPDWORD TaskIndex );
3717 //-----------------------------------------------------------------------------
3719 // WASAPI dictates stream sample rate, format, channel count, and in some cases, buffer size.
3720 // Therefore we must perform all necessary conversions to user buffers in order to satisfy these
3721 // requirements. WasapiBuffer ring buffers are used between HwIn->UserIn and UserOut->HwOut to
3722 // provide intermediate storage for read / write synchronization.
3736 // sets the length of the internal ring buffer
3737 void setBufferSize( unsigned int bufferSize, unsigned int formatBytes ) {
3740 buffer_ = ( char* ) calloc( bufferSize, formatBytes );
3742 bufferSize_ = bufferSize;
3747 // attempt to push a buffer into the ring buffer at the current "in" index
3748 bool pushBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
3750 if ( !buffer || // incoming buffer is NULL
3751 bufferSize == 0 || // incoming buffer has no data
3752 bufferSize > bufferSize_ ) // incoming buffer too large
3757 unsigned int relOutIndex = outIndex_;
3758 unsigned int inIndexEnd = inIndex_ + bufferSize;
3759 if ( relOutIndex < inIndex_ && inIndexEnd >= bufferSize_ ) {
3760 relOutIndex += bufferSize_;
3763 // "in" index can end on the "out" index but cannot begin at it
3764 if ( inIndex_ <= relOutIndex && inIndexEnd > relOutIndex ) {
3765 return false; // not enough space between "in" index and "out" index
3768 // copy buffer from external to internal
3769 int fromZeroSize = inIndex_ + bufferSize - bufferSize_;
3770 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
3771 int fromInSize = bufferSize - fromZeroSize;
3776 memcpy( &( ( char* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( char ) );
3777 memcpy( buffer_, &( ( char* ) buffer )[fromInSize], fromZeroSize * sizeof( char ) );
3779 case RTAUDIO_SINT16:
3780 memcpy( &( ( short* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( short ) );
3781 memcpy( buffer_, &( ( short* ) buffer )[fromInSize], fromZeroSize * sizeof( short ) );
3783 case RTAUDIO_SINT24:
3784 memcpy( &( ( S24* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( S24 ) );
3785 memcpy( buffer_, &( ( S24* ) buffer )[fromInSize], fromZeroSize * sizeof( S24 ) );
3787 case RTAUDIO_SINT32:
3788 memcpy( &( ( int* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( int ) );
3789 memcpy( buffer_, &( ( int* ) buffer )[fromInSize], fromZeroSize * sizeof( int ) );
3791 case RTAUDIO_FLOAT32:
3792 memcpy( &( ( float* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( float ) );
3793 memcpy( buffer_, &( ( float* ) buffer )[fromInSize], fromZeroSize * sizeof( float ) );
3795 case RTAUDIO_FLOAT64:
3796 memcpy( &( ( double* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( double ) );
3797 memcpy( buffer_, &( ( double* ) buffer )[fromInSize], fromZeroSize * sizeof( double ) );
3801 // update "in" index
3802 inIndex_ += bufferSize;
3803 inIndex_ %= bufferSize_;
3808 // attempt to pull a buffer from the ring buffer from the current "out" index
3809 bool pullBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
3811 if ( !buffer || // incoming buffer is NULL
3812 bufferSize == 0 || // incoming buffer has no data
3813 bufferSize > bufferSize_ ) // incoming buffer too large
3818 unsigned int relInIndex = inIndex_;
3819 unsigned int outIndexEnd = outIndex_ + bufferSize;
3820 if ( relInIndex < outIndex_ && outIndexEnd >= bufferSize_ ) {
3821 relInIndex += bufferSize_;
3824 // "out" index can begin at and end on the "in" index
3825 if ( outIndex_ < relInIndex && outIndexEnd > relInIndex ) {
3826 return false; // not enough space between "out" index and "in" index
3829 // copy buffer from internal to external
3830 int fromZeroSize = outIndex_ + bufferSize - bufferSize_;
3831 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
3832 int fromOutSize = bufferSize - fromZeroSize;
3837 memcpy( buffer, &( ( char* ) buffer_ )[outIndex_], fromOutSize * sizeof( char ) );
3838 memcpy( &( ( char* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( char ) );
3840 case RTAUDIO_SINT16:
3841 memcpy( buffer, &( ( short* ) buffer_ )[outIndex_], fromOutSize * sizeof( short ) );
3842 memcpy( &( ( short* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( short ) );
3844 case RTAUDIO_SINT24:
3845 memcpy( buffer, &( ( S24* ) buffer_ )[outIndex_], fromOutSize * sizeof( S24 ) );
3846 memcpy( &( ( S24* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( S24 ) );
3848 case RTAUDIO_SINT32:
3849 memcpy( buffer, &( ( int* ) buffer_ )[outIndex_], fromOutSize * sizeof( int ) );
3850 memcpy( &( ( int* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( int ) );
3852 case RTAUDIO_FLOAT32:
3853 memcpy( buffer, &( ( float* ) buffer_ )[outIndex_], fromOutSize * sizeof( float ) );
3854 memcpy( &( ( float* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( float ) );
3856 case RTAUDIO_FLOAT64:
3857 memcpy( buffer, &( ( double* ) buffer_ )[outIndex_], fromOutSize * sizeof( double ) );
3858 memcpy( &( ( double* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( double ) );
3862 // update "out" index
3863 outIndex_ += bufferSize;
3864 outIndex_ %= bufferSize_;
3871 unsigned int bufferSize_;
3872 unsigned int inIndex_;
3873 unsigned int outIndex_;
3876 //-----------------------------------------------------------------------------
3878 // In order to satisfy WASAPI's buffer requirements, we need a means of converting sample rate
3879 // between HW and the user. The WasapiResampler class is used to perform this conversion between
3880 // HwIn->UserIn and UserOut->HwOut during the stream callback loop.
3881 class WasapiResampler
3884 WasapiResampler( bool isFloat, unsigned int bitsPerSample, unsigned int channelCount,
3885 unsigned int inSampleRate, unsigned int outSampleRate )
3886 : _bytesPerSample( bitsPerSample / 8 )
3887 , _channelCount( channelCount )
3888 , _sampleRatio( ( float ) outSampleRate / inSampleRate )
3889 , _transformUnk( NULL )
3890 , _transform( NULL )
3891 , _resamplerProps( NULL )
3892 , _mediaType( NULL )
3893 , _inputMediaType( NULL )
3894 , _outputMediaType( NULL )
3896 // 1. Initialization
3898 MFStartup( MF_VERSION, MFSTARTUP_NOSOCKET );
3900 // 2. Create Resampler Transform Object
3902 CoCreateInstance( CLSID_CResamplerMediaObject, NULL, CLSCTX_INPROC_SERVER,
3903 IID_IUnknown, ( void** ) &_transformUnk );
3905 _transformUnk->QueryInterface( IID_PPV_ARGS( &_transform ) );
3907 _transformUnk->QueryInterface( IID_PPV_ARGS( &_resamplerProps ) );
3908 _resamplerProps->SetHalfFilterLength( 60 ); // best conversion quality
3910 // 3. Specify input / output format
3912 MFCreateMediaType( &_mediaType );
3913 _mediaType->SetGUID( MF_MT_MAJOR_TYPE, MFMediaType_Audio );
3914 _mediaType->SetGUID( MF_MT_SUBTYPE, isFloat ? MFAudioFormat_Float : MFAudioFormat_PCM );
3915 _mediaType->SetUINT32( MF_MT_AUDIO_NUM_CHANNELS, channelCount );
3916 _mediaType->SetUINT32( MF_MT_AUDIO_SAMPLES_PER_SECOND, inSampleRate );
3917 _mediaType->SetUINT32( MF_MT_AUDIO_BLOCK_ALIGNMENT, _bytesPerSample * channelCount );
3918 _mediaType->SetUINT32( MF_MT_AUDIO_AVG_BYTES_PER_SECOND, _bytesPerSample * channelCount * inSampleRate );
3919 _mediaType->SetUINT32( MF_MT_AUDIO_BITS_PER_SAMPLE, bitsPerSample );
3920 _mediaType->SetUINT32( MF_MT_ALL_SAMPLES_INDEPENDENT, TRUE );
3922 MFCreateMediaType( &_inputMediaType );
3923 _mediaType->CopyAllItems( _inputMediaType );
3925 _transform->SetInputType( 0, _inputMediaType, 0 );
3927 MFCreateMediaType( &_outputMediaType );
3928 _mediaType->CopyAllItems( _outputMediaType );
3930 _outputMediaType->SetUINT32( MF_MT_AUDIO_SAMPLES_PER_SECOND, outSampleRate );
3931 _outputMediaType->SetUINT32( MF_MT_AUDIO_AVG_BYTES_PER_SECOND, _bytesPerSample * channelCount * outSampleRate );
3933 _transform->SetOutputType( 0, _outputMediaType, 0 );
3935 // 4. Send stream start messages to Resampler
3937 _transform->ProcessMessage( MFT_MESSAGE_COMMAND_FLUSH, NULL );
3938 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_BEGIN_STREAMING, NULL );
3939 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_START_OF_STREAM, NULL );
3944 // 8. Send stream stop messages to Resampler
3946 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_END_OF_STREAM, NULL );
3947 _transform->ProcessMessage( MFT_MESSAGE_NOTIFY_END_STREAMING, NULL );
3953 SAFE_RELEASE( _transformUnk );
3954 SAFE_RELEASE( _transform );
3955 SAFE_RELEASE( _resamplerProps );
3956 SAFE_RELEASE( _mediaType );
3957 SAFE_RELEASE( _inputMediaType );
3958 SAFE_RELEASE( _outputMediaType );
3961 void Convert( char* outBuffer, const char* inBuffer, unsigned int inSampleCount, unsigned int& outSampleCount )
3963 unsigned int inputBufferSize = _bytesPerSample * _channelCount * inSampleCount;
3964 if ( _sampleRatio == 1 )
3966 // no sample rate conversion required
3967 memcpy( outBuffer, inBuffer, inputBufferSize );
3968 outSampleCount = inSampleCount;
3972 unsigned int outputBufferSize = ( unsigned int ) ceilf( inputBufferSize * _sampleRatio ) + ( _bytesPerSample * _channelCount );
3974 IMFMediaBuffer* rInBuffer;
3975 IMFSample* rInSample;
3976 BYTE* rInByteBuffer = NULL;
3978 // 5. Create Sample object from input data
3980 MFCreateMemoryBuffer( inputBufferSize, &rInBuffer );
3982 rInBuffer->Lock( &rInByteBuffer, NULL, NULL );
3983 memcpy( rInByteBuffer, inBuffer, inputBufferSize );
3984 rInBuffer->Unlock();
3985 rInByteBuffer = NULL;
3987 rInBuffer->SetCurrentLength( inputBufferSize );
3989 MFCreateSample( &rInSample );
3990 rInSample->AddBuffer( rInBuffer );
3992 // 6. Pass input data to Resampler
3994 _transform->ProcessInput( 0, rInSample, 0 );
3996 SAFE_RELEASE( rInBuffer );
3997 SAFE_RELEASE( rInSample );
3999 // 7. Perform sample rate conversion
4001 IMFMediaBuffer* rOutBuffer = NULL;
4002 BYTE* rOutByteBuffer = NULL;
4004 MFT_OUTPUT_DATA_BUFFER rOutDataBuffer;
4006 DWORD rBytes = outputBufferSize; // maximum bytes accepted per ProcessOutput
4008 // 7.1 Create Sample object for output data
4010 memset( &rOutDataBuffer, 0, sizeof rOutDataBuffer );
4011 MFCreateSample( &( rOutDataBuffer.pSample ) );
4012 MFCreateMemoryBuffer( rBytes, &rOutBuffer );
4013 rOutDataBuffer.pSample->AddBuffer( rOutBuffer );
4014 rOutDataBuffer.dwStreamID = 0;
4015 rOutDataBuffer.dwStatus = 0;
4016 rOutDataBuffer.pEvents = NULL;
4018 // 7.2 Get output data from Resampler
4020 if ( _transform->ProcessOutput( 0, 1, &rOutDataBuffer, &rStatus ) == MF_E_TRANSFORM_NEED_MORE_INPUT )
4023 SAFE_RELEASE( rOutBuffer );
4024 SAFE_RELEASE( rOutDataBuffer.pSample );
4028 // 7.3 Write output data to outBuffer
4030 SAFE_RELEASE( rOutBuffer );
4031 rOutDataBuffer.pSample->ConvertToContiguousBuffer( &rOutBuffer );
4032 rOutBuffer->GetCurrentLength( &rBytes );
4034 rOutBuffer->Lock( &rOutByteBuffer, NULL, NULL );
4035 memcpy( outBuffer, rOutByteBuffer, rBytes );
4036 rOutBuffer->Unlock();
4037 rOutByteBuffer = NULL;
4039 outSampleCount = rBytes / _bytesPerSample / _channelCount;
4040 SAFE_RELEASE( rOutBuffer );
4041 SAFE_RELEASE( rOutDataBuffer.pSample );
4045 unsigned int _bytesPerSample;
4046 unsigned int _channelCount;
4049 IUnknown* _transformUnk;
4050 IMFTransform* _transform;
4051 IWMResamplerProps* _resamplerProps;
4052 IMFMediaType* _mediaType;
4053 IMFMediaType* _inputMediaType;
4054 IMFMediaType* _outputMediaType;
4057 //-----------------------------------------------------------------------------
4059 // A structure to hold various information related to the WASAPI implementation.
4062 IAudioClient* captureAudioClient;
4063 IAudioClient* renderAudioClient;
4064 IAudioCaptureClient* captureClient;
4065 IAudioRenderClient* renderClient;
4066 HANDLE captureEvent;
4070 : captureAudioClient( NULL ),
4071 renderAudioClient( NULL ),
4072 captureClient( NULL ),
4073 renderClient( NULL ),
4074 captureEvent( NULL ),
4075 renderEvent( NULL ) {}
4078 //=============================================================================
4080 RtApiWasapi::RtApiWasapi()
4081 : coInitialized_( false ), deviceEnumerator_( NULL )
4083 // WASAPI can run either apartment or multi-threaded
4084 HRESULT hr = CoInitialize( NULL );
4085 if ( !FAILED( hr ) )
4086 coInitialized_ = true;
4088 // Instantiate device enumerator
4089 hr = CoCreateInstance( __uuidof( MMDeviceEnumerator ), NULL,
4090 CLSCTX_ALL, __uuidof( IMMDeviceEnumerator ),
4091 ( void** ) &deviceEnumerator_ );
4093 if ( FAILED( hr ) ) {
4094 errorText_ = "RtApiWasapi::RtApiWasapi: Unable to instantiate device enumerator";
4095 error( RtAudioError::DRIVER_ERROR );
4099 //-----------------------------------------------------------------------------
4101 RtApiWasapi::~RtApiWasapi()
4103 if ( stream_.state != STREAM_CLOSED )
4106 SAFE_RELEASE( deviceEnumerator_ );
4108 // If this object previously called CoInitialize()
4109 if ( coInitialized_ )
4113 //=============================================================================
4115 unsigned int RtApiWasapi::getDeviceCount( void )
4117 unsigned int captureDeviceCount = 0;
4118 unsigned int renderDeviceCount = 0;
4120 IMMDeviceCollection* captureDevices = NULL;
4121 IMMDeviceCollection* renderDevices = NULL;
4123 // Count capture devices
4125 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4126 if ( FAILED( hr ) ) {
4127 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device collection.";
4131 hr = captureDevices->GetCount( &captureDeviceCount );
4132 if ( FAILED( hr ) ) {
4133 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device count.";
4137 // Count render devices
4138 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4139 if ( FAILED( hr ) ) {
4140 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device collection.";
4144 hr = renderDevices->GetCount( &renderDeviceCount );
4145 if ( FAILED( hr ) ) {
4146 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device count.";
4151 // release all references
4152 SAFE_RELEASE( captureDevices );
4153 SAFE_RELEASE( renderDevices );
4155 if ( errorText_.empty() )
4156 return captureDeviceCount + renderDeviceCount;
4158 error( RtAudioError::DRIVER_ERROR );
4162 //-----------------------------------------------------------------------------
4164 RtAudio::DeviceInfo RtApiWasapi::getDeviceInfo( unsigned int device )
4166 RtAudio::DeviceInfo info;
4167 unsigned int captureDeviceCount = 0;
4168 unsigned int renderDeviceCount = 0;
4169 std::string defaultDeviceName;
4170 bool isCaptureDevice = false;
4172 PROPVARIANT deviceNameProp;
4173 PROPVARIANT defaultDeviceNameProp;
4175 IMMDeviceCollection* captureDevices = NULL;
4176 IMMDeviceCollection* renderDevices = NULL;
4177 IMMDevice* devicePtr = NULL;
4178 IMMDevice* defaultDevicePtr = NULL;
4179 IAudioClient* audioClient = NULL;
4180 IPropertyStore* devicePropStore = NULL;
4181 IPropertyStore* defaultDevicePropStore = NULL;
4183 WAVEFORMATEX* deviceFormat = NULL;
4184 WAVEFORMATEX* closestMatchFormat = NULL;
4187 info.probed = false;
4189 // Count capture devices
4191 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4192 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4193 if ( FAILED( hr ) ) {
4194 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device collection.";
4198 hr = captureDevices->GetCount( &captureDeviceCount );
4199 if ( FAILED( hr ) ) {
4200 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device count.";
4204 // Count render devices
4205 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4206 if ( FAILED( hr ) ) {
4207 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device collection.";
4211 hr = renderDevices->GetCount( &renderDeviceCount );
4212 if ( FAILED( hr ) ) {
4213 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device count.";
4217 // validate device index
4218 if ( device >= captureDeviceCount + renderDeviceCount ) {
4219 errorText_ = "RtApiWasapi::getDeviceInfo: Invalid device index.";
4220 errorType = RtAudioError::INVALID_USE;
4224 // determine whether index falls within capture or render devices
4225 if ( device >= renderDeviceCount ) {
4226 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
4227 if ( FAILED( hr ) ) {
4228 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device handle.";
4231 isCaptureDevice = true;
4234 hr = renderDevices->Item( device, &devicePtr );
4235 if ( FAILED( hr ) ) {
4236 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device handle.";
4239 isCaptureDevice = false;
4242 // get default device name
4243 if ( isCaptureDevice ) {
4244 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eCapture, eConsole, &defaultDevicePtr );
4245 if ( FAILED( hr ) ) {
4246 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default capture device handle.";
4251 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eRender, eConsole, &defaultDevicePtr );
4252 if ( FAILED( hr ) ) {
4253 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default render device handle.";
4258 hr = defaultDevicePtr->OpenPropertyStore( STGM_READ, &defaultDevicePropStore );
4259 if ( FAILED( hr ) ) {
4260 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open default device property store.";
4263 PropVariantInit( &defaultDeviceNameProp );
4265 hr = defaultDevicePropStore->GetValue( PKEY_Device_FriendlyName, &defaultDeviceNameProp );
4266 if ( FAILED( hr ) ) {
4267 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default device property: PKEY_Device_FriendlyName.";
4271 defaultDeviceName = convertCharPointerToStdString(defaultDeviceNameProp.pwszVal);
4274 hr = devicePtr->OpenPropertyStore( STGM_READ, &devicePropStore );
4275 if ( FAILED( hr ) ) {
4276 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open device property store.";
4280 PropVariantInit( &deviceNameProp );
4282 hr = devicePropStore->GetValue( PKEY_Device_FriendlyName, &deviceNameProp );
4283 if ( FAILED( hr ) ) {
4284 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device property: PKEY_Device_FriendlyName.";
4288 info.name =convertCharPointerToStdString(deviceNameProp.pwszVal);
4291 if ( isCaptureDevice ) {
4292 info.isDefaultInput = info.name == defaultDeviceName;
4293 info.isDefaultOutput = false;
4296 info.isDefaultInput = false;
4297 info.isDefaultOutput = info.name == defaultDeviceName;
4301 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL, NULL, ( void** ) &audioClient );
4302 if ( FAILED( hr ) ) {
4303 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device audio client.";
4307 hr = audioClient->GetMixFormat( &deviceFormat );
4308 if ( FAILED( hr ) ) {
4309 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device mix format.";
4313 if ( isCaptureDevice ) {
4314 info.inputChannels = deviceFormat->nChannels;
4315 info.outputChannels = 0;
4316 info.duplexChannels = 0;
4319 info.inputChannels = 0;
4320 info.outputChannels = deviceFormat->nChannels;
4321 info.duplexChannels = 0;
4325 info.sampleRates.clear();
4327 // allow support for all sample rates as we have a built-in sample rate converter
4328 for ( unsigned int i = 0; i < MAX_SAMPLE_RATES; i++ ) {
4329 info.sampleRates.push_back( SAMPLE_RATES[i] );
4331 info.preferredSampleRate = deviceFormat->nSamplesPerSec;
4334 info.nativeFormats = 0;
4336 if ( deviceFormat->wFormatTag == WAVE_FORMAT_IEEE_FLOAT ||
4337 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
4338 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_IEEE_FLOAT ) )
4340 if ( deviceFormat->wBitsPerSample == 32 ) {
4341 info.nativeFormats |= RTAUDIO_FLOAT32;
4343 else if ( deviceFormat->wBitsPerSample == 64 ) {
4344 info.nativeFormats |= RTAUDIO_FLOAT64;
4347 else if ( deviceFormat->wFormatTag == WAVE_FORMAT_PCM ||
4348 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
4349 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_PCM ) )
4351 if ( deviceFormat->wBitsPerSample == 8 ) {
4352 info.nativeFormats |= RTAUDIO_SINT8;
4354 else if ( deviceFormat->wBitsPerSample == 16 ) {
4355 info.nativeFormats |= RTAUDIO_SINT16;
4357 else if ( deviceFormat->wBitsPerSample == 24 ) {
4358 info.nativeFormats |= RTAUDIO_SINT24;
4360 else if ( deviceFormat->wBitsPerSample == 32 ) {
4361 info.nativeFormats |= RTAUDIO_SINT32;
4369 // release all references
4370 PropVariantClear( &deviceNameProp );
4371 PropVariantClear( &defaultDeviceNameProp );
4373 SAFE_RELEASE( captureDevices );
4374 SAFE_RELEASE( renderDevices );
4375 SAFE_RELEASE( devicePtr );
4376 SAFE_RELEASE( defaultDevicePtr );
4377 SAFE_RELEASE( audioClient );
4378 SAFE_RELEASE( devicePropStore );
4379 SAFE_RELEASE( defaultDevicePropStore );
4381 CoTaskMemFree( deviceFormat );
4382 CoTaskMemFree( closestMatchFormat );
4384 if ( !errorText_.empty() )
4389 //-----------------------------------------------------------------------------
4391 unsigned int RtApiWasapi::getDefaultOutputDevice( void )
4393 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
4394 if ( getDeviceInfo( i ).isDefaultOutput ) {
4402 //-----------------------------------------------------------------------------
4404 unsigned int RtApiWasapi::getDefaultInputDevice( void )
4406 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
4407 if ( getDeviceInfo( i ).isDefaultInput ) {
4415 //-----------------------------------------------------------------------------
4417 void RtApiWasapi::closeStream( void )
4419 if ( stream_.state == STREAM_CLOSED ) {
4420 errorText_ = "RtApiWasapi::closeStream: No open stream to close.";
4421 error( RtAudioError::WARNING );
4425 if ( stream_.state != STREAM_STOPPED )
4428 // clean up stream memory
4429 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient )
4430 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient )
4432 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureClient )
4433 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderClient )
4435 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent )
4436 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent );
4438 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent )
4439 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent );
4441 delete ( WasapiHandle* ) stream_.apiHandle;
4442 stream_.apiHandle = NULL;
4444 for ( int i = 0; i < 2; i++ ) {
4445 if ( stream_.userBuffer[i] ) {
4446 free( stream_.userBuffer[i] );
4447 stream_.userBuffer[i] = 0;
4451 if ( stream_.deviceBuffer ) {
4452 free( stream_.deviceBuffer );
4453 stream_.deviceBuffer = 0;
4456 // update stream state
4457 stream_.state = STREAM_CLOSED;
4460 //-----------------------------------------------------------------------------
4462 void RtApiWasapi::startStream( void )
4466 if ( stream_.state == STREAM_RUNNING ) {
4467 errorText_ = "RtApiWasapi::startStream: The stream is already running.";
4468 error( RtAudioError::WARNING );
4472 // update stream state
4473 stream_.state = STREAM_RUNNING;
4475 // create WASAPI stream thread
4476 stream_.callbackInfo.thread = ( ThreadHandle ) CreateThread( NULL, 0, runWasapiThread, this, CREATE_SUSPENDED, NULL );
4478 if ( !stream_.callbackInfo.thread ) {
4479 errorText_ = "RtApiWasapi::startStream: Unable to instantiate callback thread.";
4480 error( RtAudioError::THREAD_ERROR );
4483 SetThreadPriority( ( void* ) stream_.callbackInfo.thread, stream_.callbackInfo.priority );
4484 ResumeThread( ( void* ) stream_.callbackInfo.thread );
4488 //-----------------------------------------------------------------------------
4490 void RtApiWasapi::stopStream( void )
4494 if ( stream_.state == STREAM_STOPPED ) {
4495 errorText_ = "RtApiWasapi::stopStream: The stream is already stopped.";
4496 error( RtAudioError::WARNING );
4500 // inform stream thread by setting stream state to STREAM_STOPPING
4501 stream_.state = STREAM_STOPPING;
4503 // wait until stream thread is stopped
4504 while( stream_.state != STREAM_STOPPED ) {
4508 // Wait for the last buffer to play before stopping.
4509 Sleep( 1000 * stream_.bufferSize / stream_.sampleRate );
4511 // stop capture client if applicable
4512 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) {
4513 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient->Stop();
4514 if ( FAILED( hr ) ) {
4515 errorText_ = "RtApiWasapi::stopStream: Unable to stop capture stream.";
4516 error( RtAudioError::DRIVER_ERROR );
4521 // stop render client if applicable
4522 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) {
4523 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient->Stop();
4524 if ( FAILED( hr ) ) {
4525 errorText_ = "RtApiWasapi::stopStream: Unable to stop render stream.";
4526 error( RtAudioError::DRIVER_ERROR );
4531 // close thread handle
4532 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
4533 errorText_ = "RtApiWasapi::stopStream: Unable to close callback thread.";
4534 error( RtAudioError::THREAD_ERROR );
4538 stream_.callbackInfo.thread = (ThreadHandle) NULL;
4541 //-----------------------------------------------------------------------------
4543 void RtApiWasapi::abortStream( void )
4547 if ( stream_.state == STREAM_STOPPED ) {
4548 errorText_ = "RtApiWasapi::abortStream: The stream is already stopped.";
4549 error( RtAudioError::WARNING );
4553 // inform stream thread by setting stream state to STREAM_STOPPING
4554 stream_.state = STREAM_STOPPING;
4556 // wait until stream thread is stopped
4557 while ( stream_.state != STREAM_STOPPED ) {
4561 // stop capture client if applicable
4562 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) {
4563 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient->Stop();
4564 if ( FAILED( hr ) ) {
4565 errorText_ = "RtApiWasapi::abortStream: Unable to stop capture stream.";
4566 error( RtAudioError::DRIVER_ERROR );
4571 // stop render client if applicable
4572 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) {
4573 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient->Stop();
4574 if ( FAILED( hr ) ) {
4575 errorText_ = "RtApiWasapi::abortStream: Unable to stop render stream.";
4576 error( RtAudioError::DRIVER_ERROR );
4581 // close thread handle
4582 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
4583 errorText_ = "RtApiWasapi::abortStream: Unable to close callback thread.";
4584 error( RtAudioError::THREAD_ERROR );
4588 stream_.callbackInfo.thread = (ThreadHandle) NULL;
4591 //-----------------------------------------------------------------------------
4593 bool RtApiWasapi::probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
4594 unsigned int firstChannel, unsigned int sampleRate,
4595 RtAudioFormat format, unsigned int* bufferSize,
4596 RtAudio::StreamOptions* options )
4598 bool methodResult = FAILURE;
4599 unsigned int captureDeviceCount = 0;
4600 unsigned int renderDeviceCount = 0;
4602 IMMDeviceCollection* captureDevices = NULL;
4603 IMMDeviceCollection* renderDevices = NULL;
4604 IMMDevice* devicePtr = NULL;
4605 WAVEFORMATEX* deviceFormat = NULL;
4606 unsigned int bufferBytes;
4607 stream_.state = STREAM_STOPPED;
4609 // create API Handle if not already created
4610 if ( !stream_.apiHandle )
4611 stream_.apiHandle = ( void* ) new WasapiHandle();
4613 // Count capture devices
4615 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4616 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4617 if ( FAILED( hr ) ) {
4618 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device collection.";
4622 hr = captureDevices->GetCount( &captureDeviceCount );
4623 if ( FAILED( hr ) ) {
4624 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device count.";
4628 // Count render devices
4629 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4630 if ( FAILED( hr ) ) {
4631 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device collection.";
4635 hr = renderDevices->GetCount( &renderDeviceCount );
4636 if ( FAILED( hr ) ) {
4637 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device count.";
4641 // validate device index
4642 if ( device >= captureDeviceCount + renderDeviceCount ) {
4643 errorType = RtAudioError::INVALID_USE;
4644 errorText_ = "RtApiWasapi::probeDeviceOpen: Invalid device index.";
4648 // determine whether index falls within capture or render devices
4649 if ( device >= renderDeviceCount ) {
4650 if ( mode != INPUT ) {
4651 errorType = RtAudioError::INVALID_USE;
4652 errorText_ = "RtApiWasapi::probeDeviceOpen: Capture device selected as output device.";
4656 // retrieve captureAudioClient from devicePtr
4657 IAudioClient*& captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4659 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
4660 if ( FAILED( hr ) ) {
4661 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device handle.";
4665 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4666 NULL, ( void** ) &captureAudioClient );
4667 if ( FAILED( hr ) ) {
4668 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device audio client.";
4672 hr = captureAudioClient->GetMixFormat( &deviceFormat );
4673 if ( FAILED( hr ) ) {
4674 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device mix format.";
4678 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4679 captureAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4682 if ( mode != OUTPUT ) {
4683 errorType = RtAudioError::INVALID_USE;
4684 errorText_ = "RtApiWasapi::probeDeviceOpen: Render device selected as input device.";
4688 // retrieve renderAudioClient from devicePtr
4689 IAudioClient*& renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4691 hr = renderDevices->Item( device, &devicePtr );
4692 if ( FAILED( hr ) ) {
4693 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device handle.";
4697 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4698 NULL, ( void** ) &renderAudioClient );
4699 if ( FAILED( hr ) ) {
4700 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device audio client.";
4704 hr = renderAudioClient->GetMixFormat( &deviceFormat );
4705 if ( FAILED( hr ) ) {
4706 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device mix format.";
4710 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4711 renderAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4715 if ( ( stream_.mode == OUTPUT && mode == INPUT ) ||
4716 ( stream_.mode == INPUT && mode == OUTPUT ) ) {
4717 stream_.mode = DUPLEX;
4720 stream_.mode = mode;
4723 stream_.device[mode] = device;
4724 stream_.doByteSwap[mode] = false;
4725 stream_.sampleRate = sampleRate;
4726 stream_.bufferSize = *bufferSize;
4727 stream_.nBuffers = 1;
4728 stream_.nUserChannels[mode] = channels;
4729 stream_.channelOffset[mode] = firstChannel;
4730 stream_.userFormat = format;
4731 stream_.deviceFormat[mode] = getDeviceInfo( device ).nativeFormats;
4733 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
4734 stream_.userInterleaved = false;
4736 stream_.userInterleaved = true;
4737 stream_.deviceInterleaved[mode] = true;
4739 // Set flags for buffer conversion.
4740 stream_.doConvertBuffer[mode] = false;
4741 if ( stream_.userFormat != stream_.deviceFormat[mode] ||
4742 stream_.nUserChannels[0] != stream_.nDeviceChannels[0] ||
4743 stream_.nUserChannels[1] != stream_.nDeviceChannels[1] )
4744 stream_.doConvertBuffer[mode] = true;
4745 else if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
4746 stream_.nUserChannels[mode] > 1 )
4747 stream_.doConvertBuffer[mode] = true;
4749 if ( stream_.doConvertBuffer[mode] )
4750 setConvertInfo( mode, 0 );
4752 // Allocate necessary internal buffers
4753 bufferBytes = stream_.nUserChannels[mode] * stream_.bufferSize * formatBytes( stream_.userFormat );
4755 stream_.userBuffer[mode] = ( char* ) calloc( bufferBytes, 1 );
4756 if ( !stream_.userBuffer[mode] ) {
4757 errorType = RtAudioError::MEMORY_ERROR;
4758 errorText_ = "RtApiWasapi::probeDeviceOpen: Error allocating user buffer memory.";
4762 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME )
4763 stream_.callbackInfo.priority = 15;
4765 stream_.callbackInfo.priority = 0;
4767 ///! TODO: RTAUDIO_MINIMIZE_LATENCY // Provide stream buffers directly to callback
4768 ///! TODO: RTAUDIO_HOG_DEVICE // Exclusive mode
4770 methodResult = SUCCESS;
4774 SAFE_RELEASE( captureDevices );
4775 SAFE_RELEASE( renderDevices );
4776 SAFE_RELEASE( devicePtr );
4777 CoTaskMemFree( deviceFormat );
4779 // if method failed, close the stream
4780 if ( methodResult == FAILURE )
4783 if ( !errorText_.empty() )
4785 return methodResult;
4788 //=============================================================================
4790 DWORD WINAPI RtApiWasapi::runWasapiThread( void* wasapiPtr )
4793 ( ( RtApiWasapi* ) wasapiPtr )->wasapiThread();
4798 DWORD WINAPI RtApiWasapi::stopWasapiThread( void* wasapiPtr )
4801 ( ( RtApiWasapi* ) wasapiPtr )->stopStream();
4806 DWORD WINAPI RtApiWasapi::abortWasapiThread( void* wasapiPtr )
4809 ( ( RtApiWasapi* ) wasapiPtr )->abortStream();
4814 //-----------------------------------------------------------------------------
4816 void RtApiWasapi::wasapiThread()
4818 // as this is a new thread, we must CoInitialize it
4819 CoInitialize( NULL );
4823 IAudioClient* captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4824 IAudioClient* renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4825 IAudioCaptureClient* captureClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureClient;
4826 IAudioRenderClient* renderClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderClient;
4827 HANDLE captureEvent = ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent;
4828 HANDLE renderEvent = ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent;
4830 WAVEFORMATEX* captureFormat = NULL;
4831 WAVEFORMATEX* renderFormat = NULL;
4832 float captureSrRatio = 0.0f;
4833 float renderSrRatio = 0.0f;
4834 WasapiBuffer captureBuffer;
4835 WasapiBuffer renderBuffer;
4836 WasapiResampler* captureResampler = NULL;
4837 WasapiResampler* renderResampler = NULL;
4839 // declare local stream variables
4840 RtAudioCallback callback = ( RtAudioCallback ) stream_.callbackInfo.callback;
4841 BYTE* streamBuffer = NULL;
4842 unsigned long captureFlags = 0;
4843 unsigned int bufferFrameCount = 0;
4844 unsigned int numFramesPadding = 0;
4845 unsigned int convBufferSize = 0;
4846 bool callbackPushed = true;
4847 bool callbackPulled = false;
4848 bool callbackStopped = false;
4849 int callbackResult = 0;
4851 // convBuffer is used to store converted buffers between WASAPI and the user
4852 char* convBuffer = NULL;
4853 unsigned int convBuffSize = 0;
4854 unsigned int deviceBuffSize = 0;
4857 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4859 // Attempt to assign "Pro Audio" characteristic to thread
4860 HMODULE AvrtDll = LoadLibrary( (LPCTSTR) "AVRT.dll" );
4862 DWORD taskIndex = 0;
4863 TAvSetMmThreadCharacteristicsPtr AvSetMmThreadCharacteristicsPtr = ( TAvSetMmThreadCharacteristicsPtr ) GetProcAddress( AvrtDll, "AvSetMmThreadCharacteristicsW" );
4864 AvSetMmThreadCharacteristicsPtr( L"Pro Audio", &taskIndex );
4865 FreeLibrary( AvrtDll );
4868 // start capture stream if applicable
4869 if ( captureAudioClient ) {
4870 hr = captureAudioClient->GetMixFormat( &captureFormat );
4871 if ( FAILED( hr ) ) {
4872 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
4876 // init captureResampler
4877 captureResampler = new WasapiResampler( stream_.deviceFormat[INPUT] == RTAUDIO_FLOAT32 || stream_.deviceFormat[INPUT] == RTAUDIO_FLOAT64,
4878 formatBytes( stream_.deviceFormat[INPUT] ) * 8, stream_.nDeviceChannels[INPUT],
4879 captureFormat->nSamplesPerSec, stream_.sampleRate );
4881 captureSrRatio = ( ( float ) captureFormat->nSamplesPerSec / stream_.sampleRate );
4883 // initialize capture stream according to desire buffer size
4884 float desiredBufferSize = stream_.bufferSize * captureSrRatio;
4885 REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) desiredBufferSize * 10000000 / captureFormat->nSamplesPerSec );
4887 if ( !captureClient ) {
4888 hr = captureAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
4889 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
4890 desiredBufferPeriod,
4891 desiredBufferPeriod,
4894 if ( FAILED( hr ) ) {
4895 errorText_ = "RtApiWasapi::wasapiThread: Unable to initialize capture audio client.";
4899 hr = captureAudioClient->GetService( __uuidof( IAudioCaptureClient ),
4900 ( void** ) &captureClient );
4901 if ( FAILED( hr ) ) {
4902 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve capture client handle.";
4906 // configure captureEvent to trigger on every available capture buffer
4907 captureEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
4908 if ( !captureEvent ) {
4909 errorType = RtAudioError::SYSTEM_ERROR;
4910 errorText_ = "RtApiWasapi::wasapiThread: Unable to create capture event.";
4914 hr = captureAudioClient->SetEventHandle( captureEvent );
4915 if ( FAILED( hr ) ) {
4916 errorText_ = "RtApiWasapi::wasapiThread: Unable to set capture event handle.";
4920 ( ( WasapiHandle* ) stream_.apiHandle )->captureClient = captureClient;
4921 ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent = captureEvent;
4924 unsigned int inBufferSize = 0;
4925 hr = captureAudioClient->GetBufferSize( &inBufferSize );
4926 if ( FAILED( hr ) ) {
4927 errorText_ = "RtApiWasapi::wasapiThread: Unable to get capture buffer size.";
4931 // scale outBufferSize according to stream->user sample rate ratio
4932 unsigned int outBufferSize = ( unsigned int ) ceilf( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT];
4933 inBufferSize *= stream_.nDeviceChannels[INPUT];
4935 // set captureBuffer size
4936 captureBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[INPUT] ) );
4938 // reset the capture stream
4939 hr = captureAudioClient->Reset();
4940 if ( FAILED( hr ) ) {
4941 errorText_ = "RtApiWasapi::wasapiThread: Unable to reset capture stream.";
4945 // start the capture stream
4946 hr = captureAudioClient->Start();
4947 if ( FAILED( hr ) ) {
4948 errorText_ = "RtApiWasapi::wasapiThread: Unable to start capture stream.";
4953 // start render stream if applicable
4954 if ( renderAudioClient ) {
4955 hr = renderAudioClient->GetMixFormat( &renderFormat );
4956 if ( FAILED( hr ) ) {
4957 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
4961 // init renderResampler
4962 renderResampler = new WasapiResampler( stream_.deviceFormat[OUTPUT] == RTAUDIO_FLOAT32 || stream_.deviceFormat[OUTPUT] == RTAUDIO_FLOAT64,
4963 formatBytes( stream_.deviceFormat[OUTPUT] ) * 8, stream_.nDeviceChannels[OUTPUT],
4964 stream_.sampleRate, renderFormat->nSamplesPerSec );
4966 renderSrRatio = ( ( float ) renderFormat->nSamplesPerSec / stream_.sampleRate );
4968 // initialize render stream according to desire buffer size
4969 float desiredBufferSize = stream_.bufferSize * renderSrRatio;
4970 REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) desiredBufferSize * 10000000 / renderFormat->nSamplesPerSec );
4972 if ( !renderClient ) {
4973 hr = renderAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
4974 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
4975 desiredBufferPeriod,
4976 desiredBufferPeriod,
4979 if ( FAILED( hr ) ) {
4980 errorText_ = "RtApiWasapi::wasapiThread: Unable to initialize render audio client.";
4984 hr = renderAudioClient->GetService( __uuidof( IAudioRenderClient ),
4985 ( void** ) &renderClient );
4986 if ( FAILED( hr ) ) {
4987 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render client handle.";
4991 // configure renderEvent to trigger on every available render buffer
4992 renderEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
4993 if ( !renderEvent ) {
4994 errorType = RtAudioError::SYSTEM_ERROR;
4995 errorText_ = "RtApiWasapi::wasapiThread: Unable to create render event.";
4999 hr = renderAudioClient->SetEventHandle( renderEvent );
5000 if ( FAILED( hr ) ) {
5001 errorText_ = "RtApiWasapi::wasapiThread: Unable to set render event handle.";
5005 ( ( WasapiHandle* ) stream_.apiHandle )->renderClient = renderClient;
5006 ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent = renderEvent;
5009 unsigned int outBufferSize = 0;
5010 hr = renderAudioClient->GetBufferSize( &outBufferSize );
5011 if ( FAILED( hr ) ) {
5012 errorText_ = "RtApiWasapi::wasapiThread: Unable to get render buffer size.";
5016 // scale inBufferSize according to user->stream sample rate ratio
5017 unsigned int inBufferSize = ( unsigned int ) ceilf( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT];
5018 outBufferSize *= stream_.nDeviceChannels[OUTPUT];
5020 // set renderBuffer size
5021 renderBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[OUTPUT] ) );
5023 // reset the render stream
5024 hr = renderAudioClient->Reset();
5025 if ( FAILED( hr ) ) {
5026 errorText_ = "RtApiWasapi::wasapiThread: Unable to reset render stream.";
5030 // start the render stream
5031 hr = renderAudioClient->Start();
5032 if ( FAILED( hr ) ) {
5033 errorText_ = "RtApiWasapi::wasapiThread: Unable to start render stream.";
5038 // malloc buffer memory
5039 if ( stream_.mode == INPUT )
5041 using namespace std; // for ceilf
5042 convBuffSize = ( size_t ) ( ceilf( stream_.bufferSize * captureSrRatio ) ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
5043 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
5045 else if ( stream_.mode == OUTPUT )
5047 convBuffSize = ( size_t ) ( ceilf( stream_.bufferSize * renderSrRatio ) ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
5048 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
5050 else if ( stream_.mode == DUPLEX )
5052 convBuffSize = std::max( ( size_t ) ( ceilf( stream_.bufferSize * captureSrRatio ) ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
5053 ( size_t ) ( ceilf( stream_.bufferSize * renderSrRatio ) ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
5054 deviceBuffSize = std::max( stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
5055 stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
5058 convBuffSize *= 2; // allow overflow for *SrRatio remainders
5059 convBuffer = ( char* ) malloc( convBuffSize );
5060 stream_.deviceBuffer = ( char* ) malloc( deviceBuffSize );
5061 if ( !convBuffer || !stream_.deviceBuffer ) {
5062 errorType = RtAudioError::MEMORY_ERROR;
5063 errorText_ = "RtApiWasapi::wasapiThread: Error allocating device buffer memory.";
5067 // stream process loop
5068 while ( stream_.state != STREAM_STOPPING ) {
5069 if ( !callbackPulled ) {
5072 // 1. Pull callback buffer from inputBuffer
5073 // 2. If 1. was successful: Convert callback buffer to user sample rate and channel count
5074 // Convert callback buffer to user format
5076 if ( captureAudioClient )
5078 int samplesToPull = ( unsigned int ) floorf( stream_.bufferSize * captureSrRatio );
5079 if ( captureSrRatio != 1 )
5081 // account for remainders
5086 while ( convBufferSize < stream_.bufferSize )
5088 // Pull callback buffer from inputBuffer
5089 callbackPulled = captureBuffer.pullBuffer( convBuffer,
5090 samplesToPull * stream_.nDeviceChannels[INPUT],
5091 stream_.deviceFormat[INPUT] );
5093 if ( !callbackPulled )
5098 // Convert callback buffer to user sample rate
5099 unsigned int deviceBufferOffset = convBufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.userFormat );
5100 unsigned int convSamples = 0;
5102 captureResampler->Convert( stream_.deviceBuffer + deviceBufferOffset,
5107 convBufferSize += convSamples;
5108 samplesToPull = 1; // now pull one sample at a time until we have stream_.bufferSize samples
5111 if ( callbackPulled )
5113 if ( stream_.doConvertBuffer[INPUT] ) {
5114 // Convert callback buffer to user format
5115 convertBuffer( stream_.userBuffer[INPUT],
5116 stream_.deviceBuffer,
5117 stream_.convertInfo[INPUT] );
5120 // no further conversion, simple copy deviceBuffer to userBuffer
5121 memcpy( stream_.userBuffer[INPUT],
5122 stream_.deviceBuffer,
5123 stream_.bufferSize * stream_.nUserChannels[INPUT] * formatBytes( stream_.userFormat ) );
5128 // if there is no capture stream, set callbackPulled flag
5129 callbackPulled = true;
5134 // 1. Execute user callback method
5135 // 2. Handle return value from callback
5137 // if callback has not requested the stream to stop
5138 if ( callbackPulled && !callbackStopped ) {
5139 // Execute user callback method
5140 callbackResult = callback( stream_.userBuffer[OUTPUT],
5141 stream_.userBuffer[INPUT],
5144 captureFlags & AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY ? RTAUDIO_INPUT_OVERFLOW : 0,
5145 stream_.callbackInfo.userData );
5147 // Handle return value from callback
5148 if ( callbackResult == 1 ) {
5149 // instantiate a thread to stop this thread
5150 HANDLE threadHandle = CreateThread( NULL, 0, stopWasapiThread, this, 0, NULL );
5151 if ( !threadHandle ) {
5152 errorType = RtAudioError::THREAD_ERROR;
5153 errorText_ = "RtApiWasapi::wasapiThread: Unable to instantiate stream stop thread.";
5156 else if ( !CloseHandle( threadHandle ) ) {
5157 errorType = RtAudioError::THREAD_ERROR;
5158 errorText_ = "RtApiWasapi::wasapiThread: Unable to close stream stop thread handle.";
5162 callbackStopped = true;
5164 else if ( callbackResult == 2 ) {
5165 // instantiate a thread to stop this thread
5166 HANDLE threadHandle = CreateThread( NULL, 0, abortWasapiThread, this, 0, NULL );
5167 if ( !threadHandle ) {
5168 errorType = RtAudioError::THREAD_ERROR;
5169 errorText_ = "RtApiWasapi::wasapiThread: Unable to instantiate stream abort thread.";
5172 else if ( !CloseHandle( threadHandle ) ) {
5173 errorType = RtAudioError::THREAD_ERROR;
5174 errorText_ = "RtApiWasapi::wasapiThread: Unable to close stream abort thread handle.";
5178 callbackStopped = true;
5185 // 1. Convert callback buffer to stream format
5186 // 2. Convert callback buffer to stream sample rate and channel count
5187 // 3. Push callback buffer into outputBuffer
5189 if ( renderAudioClient && callbackPulled )
5191 // if the last call to renderBuffer.PushBuffer() was successful
5192 if ( callbackPushed || convBufferSize == 0 )
5194 if ( stream_.doConvertBuffer[OUTPUT] )
5196 // Convert callback buffer to stream format
5197 convertBuffer( stream_.deviceBuffer,
5198 stream_.userBuffer[OUTPUT],
5199 stream_.convertInfo[OUTPUT] );
5203 // Convert callback buffer to stream sample rate
5204 renderResampler->Convert( convBuffer,
5205 stream_.deviceBuffer,
5210 // Push callback buffer into outputBuffer
5211 callbackPushed = renderBuffer.pushBuffer( convBuffer,
5212 convBufferSize * stream_.nDeviceChannels[OUTPUT],
5213 stream_.deviceFormat[OUTPUT] );
5216 // if there is no render stream, set callbackPushed flag
5217 callbackPushed = true;
5222 // 1. Get capture buffer from stream
5223 // 2. Push capture buffer into inputBuffer
5224 // 3. If 2. was successful: Release capture buffer
5226 if ( captureAudioClient ) {
5227 // if the callback input buffer was not pulled from captureBuffer, wait for next capture event
5228 if ( !callbackPulled ) {
5229 WaitForSingleObject( captureEvent, INFINITE );
5232 // Get capture buffer from stream
5233 hr = captureClient->GetBuffer( &streamBuffer,
5235 &captureFlags, NULL, NULL );
5236 if ( FAILED( hr ) ) {
5237 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve capture buffer.";
5241 if ( bufferFrameCount != 0 ) {
5242 // Push capture buffer into inputBuffer
5243 if ( captureBuffer.pushBuffer( ( char* ) streamBuffer,
5244 bufferFrameCount * stream_.nDeviceChannels[INPUT],
5245 stream_.deviceFormat[INPUT] ) )
5247 // Release capture buffer
5248 hr = captureClient->ReleaseBuffer( bufferFrameCount );
5249 if ( FAILED( hr ) ) {
5250 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5256 // Inform WASAPI that capture was unsuccessful
5257 hr = captureClient->ReleaseBuffer( 0 );
5258 if ( FAILED( hr ) ) {
5259 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5266 // Inform WASAPI that capture was unsuccessful
5267 hr = captureClient->ReleaseBuffer( 0 );
5268 if ( FAILED( hr ) ) {
5269 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5277 // 1. Get render buffer from stream
5278 // 2. Pull next buffer from outputBuffer
5279 // 3. If 2. was successful: Fill render buffer with next buffer
5280 // Release render buffer
5282 if ( renderAudioClient ) {
5283 // if the callback output buffer was not pushed to renderBuffer, wait for next render event
5284 if ( callbackPulled && !callbackPushed ) {
5285 WaitForSingleObject( renderEvent, INFINITE );
5288 // Get render buffer from stream
5289 hr = renderAudioClient->GetBufferSize( &bufferFrameCount );
5290 if ( FAILED( hr ) ) {
5291 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer size.";
5295 hr = renderAudioClient->GetCurrentPadding( &numFramesPadding );
5296 if ( FAILED( hr ) ) {
5297 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer padding.";
5301 bufferFrameCount -= numFramesPadding;
5303 if ( bufferFrameCount != 0 ) {
5304 hr = renderClient->GetBuffer( bufferFrameCount, &streamBuffer );
5305 if ( FAILED( hr ) ) {
5306 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer.";
5310 // Pull next buffer from outputBuffer
5311 // Fill render buffer with next buffer
5312 if ( renderBuffer.pullBuffer( ( char* ) streamBuffer,
5313 bufferFrameCount * stream_.nDeviceChannels[OUTPUT],
5314 stream_.deviceFormat[OUTPUT] ) )
5316 // Release render buffer
5317 hr = renderClient->ReleaseBuffer( bufferFrameCount, 0 );
5318 if ( FAILED( hr ) ) {
5319 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5325 // Inform WASAPI that render was unsuccessful
5326 hr = renderClient->ReleaseBuffer( 0, 0 );
5327 if ( FAILED( hr ) ) {
5328 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5335 // Inform WASAPI that render was unsuccessful
5336 hr = renderClient->ReleaseBuffer( 0, 0 );
5337 if ( FAILED( hr ) ) {
5338 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5344 // if the callback buffer was pushed renderBuffer reset callbackPulled flag
5345 if ( callbackPushed ) {
5346 // unsetting the callbackPulled flag lets the stream know that
5347 // the audio device is ready for another callback output buffer.
5348 callbackPulled = false;
5351 RtApi::tickStreamTime();
5358 CoTaskMemFree( captureFormat );
5359 CoTaskMemFree( renderFormat );
5361 free ( convBuffer );
5362 delete renderResampler;
5363 delete captureResampler;
5367 // update stream state
5368 stream_.state = STREAM_STOPPED;
5370 if ( errorText_.empty() )
5376 //******************** End of __WINDOWS_WASAPI__ *********************//
5380 #if defined(__WINDOWS_DS__) // Windows DirectSound API
5382 // Modified by Robin Davies, October 2005
5383 // - Improvements to DirectX pointer chasing.
5384 // - Bug fix for non-power-of-two Asio granularity used by Edirol PCR-A30.
5385 // - Auto-call CoInitialize for DSOUND and ASIO platforms.
5386 // Various revisions for RtAudio 4.0 by Gary Scavone, April 2007
5387 // Changed device query structure for RtAudio 4.0.7, January 2010
5389 #include <windows.h>
5390 #include <process.h>
5391 #include <mmsystem.h>
5395 #include <algorithm>
5397 #if defined(__MINGW32__)
5398 // missing from latest mingw winapi
5399 #define WAVE_FORMAT_96M08 0x00010000 /* 96 kHz, Mono, 8-bit */
5400 #define WAVE_FORMAT_96S08 0x00020000 /* 96 kHz, Stereo, 8-bit */
5401 #define WAVE_FORMAT_96M16 0x00040000 /* 96 kHz, Mono, 16-bit */
5402 #define WAVE_FORMAT_96S16 0x00080000 /* 96 kHz, Stereo, 16-bit */
5405 #define MINIMUM_DEVICE_BUFFER_SIZE 32768
5407 #ifdef _MSC_VER // if Microsoft Visual C++
5408 #pragma comment( lib, "winmm.lib" ) // then, auto-link winmm.lib. Otherwise, it has to be added manually.
5411 static inline DWORD dsPointerBetween( DWORD pointer, DWORD laterPointer, DWORD earlierPointer, DWORD bufferSize )
5413 if ( pointer > bufferSize ) pointer -= bufferSize;
5414 if ( laterPointer < earlierPointer ) laterPointer += bufferSize;
5415 if ( pointer < earlierPointer ) pointer += bufferSize;
5416 return pointer >= earlierPointer && pointer < laterPointer;
5419 // A structure to hold various information related to the DirectSound
5420 // API implementation.
5422 unsigned int drainCounter; // Tracks callback counts when draining
5423 bool internalDrain; // Indicates if stop is initiated from callback or not.
5427 UINT bufferPointer[2];
5428 DWORD dsBufferSize[2];
5429 DWORD dsPointerLeadTime[2]; // the number of bytes ahead of the safe pointer to lead by.
5433 :drainCounter(0), internalDrain(false) { id[0] = 0; id[1] = 0; buffer[0] = 0; buffer[1] = 0; xrun[0] = false; xrun[1] = false; bufferPointer[0] = 0; bufferPointer[1] = 0; }
5436 // Declarations for utility functions, callbacks, and structures
5437 // specific to the DirectSound implementation.
5438 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
5439 LPCTSTR description,
5443 static const char* getErrorString( int code );
5445 static unsigned __stdcall callbackHandler( void *ptr );
5454 : found(false) { validId[0] = false; validId[1] = false; }
5457 struct DsProbeData {
5459 std::vector<struct DsDevice>* dsDevices;
5462 RtApiDs :: RtApiDs()
5464 // Dsound will run both-threaded. If CoInitialize fails, then just
5465 // accept whatever the mainline chose for a threading model.
5466 coInitialized_ = false;
5467 HRESULT hr = CoInitialize( NULL );
5468 if ( !FAILED( hr ) ) coInitialized_ = true;
5471 RtApiDs :: ~RtApiDs()
5473 if ( stream_.state != STREAM_CLOSED ) closeStream();
5474 if ( coInitialized_ ) CoUninitialize(); // balanced call.
5477 // The DirectSound default output is always the first device.
5478 unsigned int RtApiDs :: getDefaultOutputDevice( void )
5483 // The DirectSound default input is always the first input device,
5484 // which is the first capture device enumerated.
5485 unsigned int RtApiDs :: getDefaultInputDevice( void )
5490 unsigned int RtApiDs :: getDeviceCount( void )
5492 // Set query flag for previously found devices to false, so that we
5493 // can check for any devices that have disappeared.
5494 for ( unsigned int i=0; i<dsDevices.size(); i++ )
5495 dsDevices[i].found = false;
5497 // Query DirectSound devices.
5498 struct DsProbeData probeInfo;
5499 probeInfo.isInput = false;
5500 probeInfo.dsDevices = &dsDevices;
5501 HRESULT result = DirectSoundEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
5502 if ( FAILED( result ) ) {
5503 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating output devices!";
5504 errorText_ = errorStream_.str();
5505 error( RtAudioError::WARNING );
5508 // Query DirectSoundCapture devices.
5509 probeInfo.isInput = true;
5510 result = DirectSoundCaptureEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
5511 if ( FAILED( result ) ) {
5512 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating input devices!";
5513 errorText_ = errorStream_.str();
5514 error( RtAudioError::WARNING );
5517 // Clean out any devices that may have disappeared (code update submitted by Eli Zehngut).
5518 for ( unsigned int i=0; i<dsDevices.size(); ) {
5519 if ( dsDevices[i].found == false ) dsDevices.erase( dsDevices.begin() + i );
5523 return static_cast<unsigned int>(dsDevices.size());
5526 RtAudio::DeviceInfo RtApiDs :: getDeviceInfo( unsigned int device )
5528 RtAudio::DeviceInfo info;
5529 info.probed = false;
5531 if ( dsDevices.size() == 0 ) {
5532 // Force a query of all devices
5534 if ( dsDevices.size() == 0 ) {
5535 errorText_ = "RtApiDs::getDeviceInfo: no devices found!";
5536 error( RtAudioError::INVALID_USE );
5541 if ( device >= dsDevices.size() ) {
5542 errorText_ = "RtApiDs::getDeviceInfo: device ID is invalid!";
5543 error( RtAudioError::INVALID_USE );
5548 if ( dsDevices[ device ].validId[0] == false ) goto probeInput;
5550 LPDIRECTSOUND output;
5552 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
5553 if ( FAILED( result ) ) {
5554 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
5555 errorText_ = errorStream_.str();
5556 error( RtAudioError::WARNING );
5560 outCaps.dwSize = sizeof( outCaps );
5561 result = output->GetCaps( &outCaps );
5562 if ( FAILED( result ) ) {
5564 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting capabilities!";
5565 errorText_ = errorStream_.str();
5566 error( RtAudioError::WARNING );
5570 // Get output channel information.
5571 info.outputChannels = ( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ? 2 : 1;
5573 // Get sample rate information.
5574 info.sampleRates.clear();
5575 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
5576 if ( SAMPLE_RATES[k] >= (unsigned int) outCaps.dwMinSecondarySampleRate &&
5577 SAMPLE_RATES[k] <= (unsigned int) outCaps.dwMaxSecondarySampleRate ) {
5578 info.sampleRates.push_back( SAMPLE_RATES[k] );
5580 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
5581 info.preferredSampleRate = SAMPLE_RATES[k];
5585 // Get format information.
5586 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT ) info.nativeFormats |= RTAUDIO_SINT16;
5587 if ( outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) info.nativeFormats |= RTAUDIO_SINT8;
5591 if ( getDefaultOutputDevice() == device )
5592 info.isDefaultOutput = true;
5594 if ( dsDevices[ device ].validId[1] == false ) {
5595 info.name = dsDevices[ device ].name;
5602 LPDIRECTSOUNDCAPTURE input;
5603 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
5604 if ( FAILED( result ) ) {
5605 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
5606 errorText_ = errorStream_.str();
5607 error( RtAudioError::WARNING );
5612 inCaps.dwSize = sizeof( inCaps );
5613 result = input->GetCaps( &inCaps );
5614 if ( FAILED( result ) ) {
5616 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting object capabilities (" << dsDevices[ device ].name << ")!";
5617 errorText_ = errorStream_.str();
5618 error( RtAudioError::WARNING );
5622 // Get input channel information.
5623 info.inputChannels = inCaps.dwChannels;
5625 // Get sample rate and format information.
5626 std::vector<unsigned int> rates;
5627 if ( inCaps.dwChannels >= 2 ) {
5628 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5629 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5630 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5631 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5632 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5633 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5634 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5635 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5637 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
5638 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) rates.push_back( 11025 );
5639 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) rates.push_back( 22050 );
5640 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) rates.push_back( 44100 );
5641 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) rates.push_back( 96000 );
5643 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
5644 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) rates.push_back( 11025 );
5645 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) rates.push_back( 22050 );
5646 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) rates.push_back( 44100 );
5647 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) rates.push_back( 96000 );
5650 else if ( inCaps.dwChannels == 1 ) {
5651 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5652 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5653 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5654 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5655 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5656 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5657 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5658 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5660 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
5661 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) rates.push_back( 11025 );
5662 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) rates.push_back( 22050 );
5663 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) rates.push_back( 44100 );
5664 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) rates.push_back( 96000 );
5666 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
5667 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) rates.push_back( 11025 );
5668 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) rates.push_back( 22050 );
5669 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) rates.push_back( 44100 );
5670 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) rates.push_back( 96000 );
5673 else info.inputChannels = 0; // technically, this would be an error
5677 if ( info.inputChannels == 0 ) return info;
5679 // Copy the supported rates to the info structure but avoid duplication.
5681 for ( unsigned int i=0; i<rates.size(); i++ ) {
5683 for ( unsigned int j=0; j<info.sampleRates.size(); j++ ) {
5684 if ( rates[i] == info.sampleRates[j] ) {
5689 if ( found == false ) info.sampleRates.push_back( rates[i] );
5691 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
5693 // If device opens for both playback and capture, we determine the channels.
5694 if ( info.outputChannels > 0 && info.inputChannels > 0 )
5695 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
5697 if ( device == 0 ) info.isDefaultInput = true;
5699 // Copy name and return.
5700 info.name = dsDevices[ device ].name;
5705 bool RtApiDs :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
5706 unsigned int firstChannel, unsigned int sampleRate,
5707 RtAudioFormat format, unsigned int *bufferSize,
5708 RtAudio::StreamOptions *options )
5710 if ( channels + firstChannel > 2 ) {
5711 errorText_ = "RtApiDs::probeDeviceOpen: DirectSound does not support more than 2 channels per device.";
5715 size_t nDevices = dsDevices.size();
5716 if ( nDevices == 0 ) {
5717 // This should not happen because a check is made before this function is called.
5718 errorText_ = "RtApiDs::probeDeviceOpen: no devices found!";
5722 if ( device >= nDevices ) {
5723 // This should not happen because a check is made before this function is called.
5724 errorText_ = "RtApiDs::probeDeviceOpen: device ID is invalid!";
5728 if ( mode == OUTPUT ) {
5729 if ( dsDevices[ device ].validId[0] == false ) {
5730 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support output!";
5731 errorText_ = errorStream_.str();
5735 else { // mode == INPUT
5736 if ( dsDevices[ device ].validId[1] == false ) {
5737 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support input!";
5738 errorText_ = errorStream_.str();
5743 // According to a note in PortAudio, using GetDesktopWindow()
5744 // instead of GetForegroundWindow() is supposed to avoid problems
5745 // that occur when the application's window is not the foreground
5746 // window. Also, if the application window closes before the
5747 // DirectSound buffer, DirectSound can crash. In the past, I had
5748 // problems when using GetDesktopWindow() but it seems fine now
5749 // (January 2010). I'll leave it commented here.
5750 // HWND hWnd = GetForegroundWindow();
5751 HWND hWnd = GetDesktopWindow();
5753 // Check the numberOfBuffers parameter and limit the lowest value to
5754 // two. This is a judgement call and a value of two is probably too
5755 // low for capture, but it should work for playback.
5757 if ( options ) nBuffers = options->numberOfBuffers;
5758 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) nBuffers = 2;
5759 if ( nBuffers < 2 ) nBuffers = 3;
5761 // Check the lower range of the user-specified buffer size and set
5762 // (arbitrarily) to a lower bound of 32.
5763 if ( *bufferSize < 32 ) *bufferSize = 32;
5765 // Create the wave format structure. The data format setting will
5766 // be determined later.
5767 WAVEFORMATEX waveFormat;
5768 ZeroMemory( &waveFormat, sizeof(WAVEFORMATEX) );
5769 waveFormat.wFormatTag = WAVE_FORMAT_PCM;
5770 waveFormat.nChannels = channels + firstChannel;
5771 waveFormat.nSamplesPerSec = (unsigned long) sampleRate;
5773 // Determine the device buffer size. By default, we'll use the value
5774 // defined above (32K), but we will grow it to make allowances for
5775 // very large software buffer sizes.
5776 DWORD dsBufferSize = MINIMUM_DEVICE_BUFFER_SIZE;
5777 DWORD dsPointerLeadTime = 0;
5779 void *ohandle = 0, *bhandle = 0;
5781 if ( mode == OUTPUT ) {
5783 LPDIRECTSOUND output;
5784 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
5785 if ( FAILED( result ) ) {
5786 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
5787 errorText_ = errorStream_.str();
5792 outCaps.dwSize = sizeof( outCaps );
5793 result = output->GetCaps( &outCaps );
5794 if ( FAILED( result ) ) {
5796 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting capabilities (" << dsDevices[ device ].name << ")!";
5797 errorText_ = errorStream_.str();
5801 // Check channel information.
5802 if ( channels + firstChannel == 2 && !( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ) {
5803 errorStream_ << "RtApiDs::getDeviceInfo: the output device (" << dsDevices[ device ].name << ") does not support stereo playback.";
5804 errorText_ = errorStream_.str();
5808 // Check format information. Use 16-bit format unless not
5809 // supported or user requests 8-bit.
5810 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT &&
5811 !( format == RTAUDIO_SINT8 && outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) ) {
5812 waveFormat.wBitsPerSample = 16;
5813 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
5816 waveFormat.wBitsPerSample = 8;
5817 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
5819 stream_.userFormat = format;
5821 // Update wave format structure and buffer information.
5822 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
5823 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
5824 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
5826 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
5827 while ( dsPointerLeadTime * 2U > dsBufferSize )
5830 // Set cooperative level to DSSCL_EXCLUSIVE ... sound stops when window focus changes.
5831 // result = output->SetCooperativeLevel( hWnd, DSSCL_EXCLUSIVE );
5832 // Set cooperative level to DSSCL_PRIORITY ... sound remains when window focus changes.
5833 result = output->SetCooperativeLevel( hWnd, DSSCL_PRIORITY );
5834 if ( FAILED( result ) ) {
5836 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting cooperative level (" << dsDevices[ device ].name << ")!";
5837 errorText_ = errorStream_.str();
5841 // Even though we will write to the secondary buffer, we need to
5842 // access the primary buffer to set the correct output format
5843 // (since the default is 8-bit, 22 kHz!). Setup the DS primary
5844 // buffer description.
5845 DSBUFFERDESC bufferDescription;
5846 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
5847 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
5848 bufferDescription.dwFlags = DSBCAPS_PRIMARYBUFFER;
5850 // Obtain the primary buffer
5851 LPDIRECTSOUNDBUFFER buffer;
5852 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
5853 if ( FAILED( result ) ) {
5855 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") accessing primary buffer (" << dsDevices[ device ].name << ")!";
5856 errorText_ = errorStream_.str();
5860 // Set the primary DS buffer sound format.
5861 result = buffer->SetFormat( &waveFormat );
5862 if ( FAILED( result ) ) {
5864 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting primary buffer format (" << dsDevices[ device ].name << ")!";
5865 errorText_ = errorStream_.str();
5869 // Setup the secondary DS buffer description.
5870 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
5871 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
5872 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
5873 DSBCAPS_GLOBALFOCUS |
5874 DSBCAPS_GETCURRENTPOSITION2 |
5875 DSBCAPS_LOCHARDWARE ); // Force hardware mixing
5876 bufferDescription.dwBufferBytes = dsBufferSize;
5877 bufferDescription.lpwfxFormat = &waveFormat;
5879 // Try to create the secondary DS buffer. If that doesn't work,
5880 // try to use software mixing. Otherwise, there's a problem.
5881 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
5882 if ( FAILED( result ) ) {
5883 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
5884 DSBCAPS_GLOBALFOCUS |
5885 DSBCAPS_GETCURRENTPOSITION2 |
5886 DSBCAPS_LOCSOFTWARE ); // Force software mixing
5887 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
5888 if ( FAILED( result ) ) {
5890 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating secondary buffer (" << dsDevices[ device ].name << ")!";
5891 errorText_ = errorStream_.str();
5896 // Get the buffer size ... might be different from what we specified.
5898 dsbcaps.dwSize = sizeof( DSBCAPS );
5899 result = buffer->GetCaps( &dsbcaps );
5900 if ( FAILED( result ) ) {
5903 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
5904 errorText_ = errorStream_.str();
5908 dsBufferSize = dsbcaps.dwBufferBytes;
5910 // Lock the DS buffer
5913 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
5914 if ( FAILED( result ) ) {
5917 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking buffer (" << dsDevices[ device ].name << ")!";
5918 errorText_ = errorStream_.str();
5922 // Zero the DS buffer
5923 ZeroMemory( audioPtr, dataLen );
5925 // Unlock the DS buffer
5926 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
5927 if ( FAILED( result ) ) {
5930 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking buffer (" << dsDevices[ device ].name << ")!";
5931 errorText_ = errorStream_.str();
5935 ohandle = (void *) output;
5936 bhandle = (void *) buffer;
5939 if ( mode == INPUT ) {
5941 LPDIRECTSOUNDCAPTURE input;
5942 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
5943 if ( FAILED( result ) ) {
5944 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
5945 errorText_ = errorStream_.str();
5950 inCaps.dwSize = sizeof( inCaps );
5951 result = input->GetCaps( &inCaps );
5952 if ( FAILED( result ) ) {
5954 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting input capabilities (" << dsDevices[ device ].name << ")!";
5955 errorText_ = errorStream_.str();
5959 // Check channel information.
5960 if ( inCaps.dwChannels < channels + firstChannel ) {
5961 errorText_ = "RtApiDs::getDeviceInfo: the input device does not support requested input channels.";
5965 // Check format information. Use 16-bit format unless user
5967 DWORD deviceFormats;
5968 if ( channels + firstChannel == 2 ) {
5969 deviceFormats = WAVE_FORMAT_1S08 | WAVE_FORMAT_2S08 | WAVE_FORMAT_4S08 | WAVE_FORMAT_96S08;
5970 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
5971 waveFormat.wBitsPerSample = 8;
5972 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
5974 else { // assume 16-bit is supported
5975 waveFormat.wBitsPerSample = 16;
5976 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
5979 else { // channel == 1
5980 deviceFormats = WAVE_FORMAT_1M08 | WAVE_FORMAT_2M08 | WAVE_FORMAT_4M08 | WAVE_FORMAT_96M08;
5981 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
5982 waveFormat.wBitsPerSample = 8;
5983 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
5985 else { // assume 16-bit is supported
5986 waveFormat.wBitsPerSample = 16;
5987 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
5990 stream_.userFormat = format;
5992 // Update wave format structure and buffer information.
5993 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
5994 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
5995 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
5997 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
5998 while ( dsPointerLeadTime * 2U > dsBufferSize )
6001 // Setup the secondary DS buffer description.
6002 DSCBUFFERDESC bufferDescription;
6003 ZeroMemory( &bufferDescription, sizeof( DSCBUFFERDESC ) );
6004 bufferDescription.dwSize = sizeof( DSCBUFFERDESC );
6005 bufferDescription.dwFlags = 0;
6006 bufferDescription.dwReserved = 0;
6007 bufferDescription.dwBufferBytes = dsBufferSize;
6008 bufferDescription.lpwfxFormat = &waveFormat;
6010 // Create the capture buffer.
6011 LPDIRECTSOUNDCAPTUREBUFFER buffer;
6012 result = input->CreateCaptureBuffer( &bufferDescription, &buffer, NULL );
6013 if ( FAILED( result ) ) {
6015 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating input buffer (" << dsDevices[ device ].name << ")!";
6016 errorText_ = errorStream_.str();
6020 // Get the buffer size ... might be different from what we specified.
6022 dscbcaps.dwSize = sizeof( DSCBCAPS );
6023 result = buffer->GetCaps( &dscbcaps );
6024 if ( FAILED( result ) ) {
6027 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
6028 errorText_ = errorStream_.str();
6032 dsBufferSize = dscbcaps.dwBufferBytes;
6034 // NOTE: We could have a problem here if this is a duplex stream
6035 // and the play and capture hardware buffer sizes are different
6036 // (I'm actually not sure if that is a problem or not).
6037 // Currently, we are not verifying that.
6039 // Lock the capture buffer
6042 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
6043 if ( FAILED( result ) ) {
6046 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking input buffer (" << dsDevices[ device ].name << ")!";
6047 errorText_ = errorStream_.str();
6052 ZeroMemory( audioPtr, dataLen );
6054 // Unlock the buffer
6055 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6056 if ( FAILED( result ) ) {
6059 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking input buffer (" << dsDevices[ device ].name << ")!";
6060 errorText_ = errorStream_.str();
6064 ohandle = (void *) input;
6065 bhandle = (void *) buffer;
6068 // Set various stream parameters
6069 DsHandle *handle = 0;
6070 stream_.nDeviceChannels[mode] = channels + firstChannel;
6071 stream_.nUserChannels[mode] = channels;
6072 stream_.bufferSize = *bufferSize;
6073 stream_.channelOffset[mode] = firstChannel;
6074 stream_.deviceInterleaved[mode] = true;
6075 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
6076 else stream_.userInterleaved = true;
6078 // Set flag for buffer conversion
6079 stream_.doConvertBuffer[mode] = false;
6080 if (stream_.nUserChannels[mode] != stream_.nDeviceChannels[mode])
6081 stream_.doConvertBuffer[mode] = true;
6082 if (stream_.userFormat != stream_.deviceFormat[mode])
6083 stream_.doConvertBuffer[mode] = true;
6084 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
6085 stream_.nUserChannels[mode] > 1 )
6086 stream_.doConvertBuffer[mode] = true;
6088 // Allocate necessary internal buffers
6089 long bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
6090 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
6091 if ( stream_.userBuffer[mode] == NULL ) {
6092 errorText_ = "RtApiDs::probeDeviceOpen: error allocating user buffer memory.";
6096 if ( stream_.doConvertBuffer[mode] ) {
6098 bool makeBuffer = true;
6099 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
6100 if ( mode == INPUT ) {
6101 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
6102 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
6103 if ( bufferBytes <= (long) bytesOut ) makeBuffer = false;
6108 bufferBytes *= *bufferSize;
6109 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
6110 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
6111 if ( stream_.deviceBuffer == NULL ) {
6112 errorText_ = "RtApiDs::probeDeviceOpen: error allocating device buffer memory.";
6118 // Allocate our DsHandle structures for the stream.
6119 if ( stream_.apiHandle == 0 ) {
6121 handle = new DsHandle;
6123 catch ( std::bad_alloc& ) {
6124 errorText_ = "RtApiDs::probeDeviceOpen: error allocating AsioHandle memory.";
6128 // Create a manual-reset event.
6129 handle->condition = CreateEvent( NULL, // no security
6130 TRUE, // manual-reset
6131 FALSE, // non-signaled initially
6133 stream_.apiHandle = (void *) handle;
6136 handle = (DsHandle *) stream_.apiHandle;
6137 handle->id[mode] = ohandle;
6138 handle->buffer[mode] = bhandle;
6139 handle->dsBufferSize[mode] = dsBufferSize;
6140 handle->dsPointerLeadTime[mode] = dsPointerLeadTime;
6142 stream_.device[mode] = device;
6143 stream_.state = STREAM_STOPPED;
6144 if ( stream_.mode == OUTPUT && mode == INPUT )
6145 // We had already set up an output stream.
6146 stream_.mode = DUPLEX;
6148 stream_.mode = mode;
6149 stream_.nBuffers = nBuffers;
6150 stream_.sampleRate = sampleRate;
6152 // Setup the buffer conversion information structure.
6153 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
6155 // Setup the callback thread.
6156 if ( stream_.callbackInfo.isRunning == false ) {
6158 stream_.callbackInfo.isRunning = true;
6159 stream_.callbackInfo.object = (void *) this;
6160 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &callbackHandler,
6161 &stream_.callbackInfo, 0, &threadId );
6162 if ( stream_.callbackInfo.thread == 0 ) {
6163 errorText_ = "RtApiDs::probeDeviceOpen: error creating callback thread!";
6167 // Boost DS thread priority
6168 SetThreadPriority( (HANDLE) stream_.callbackInfo.thread, THREAD_PRIORITY_HIGHEST );
6174 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
6175 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
6176 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6177 if ( buffer ) buffer->Release();
6180 if ( handle->buffer[1] ) {
6181 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
6182 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6183 if ( buffer ) buffer->Release();
6186 CloseHandle( handle->condition );
6188 stream_.apiHandle = 0;
6191 for ( int i=0; i<2; i++ ) {
6192 if ( stream_.userBuffer[i] ) {
6193 free( stream_.userBuffer[i] );
6194 stream_.userBuffer[i] = 0;
6198 if ( stream_.deviceBuffer ) {
6199 free( stream_.deviceBuffer );
6200 stream_.deviceBuffer = 0;
6203 stream_.state = STREAM_CLOSED;
6207 void RtApiDs :: closeStream()
6209 if ( stream_.state == STREAM_CLOSED ) {
6210 errorText_ = "RtApiDs::closeStream(): no open stream to close!";
6211 error( RtAudioError::WARNING );
6215 // Stop the callback thread.
6216 stream_.callbackInfo.isRunning = false;
6217 WaitForSingleObject( (HANDLE) stream_.callbackInfo.thread, INFINITE );
6218 CloseHandle( (HANDLE) stream_.callbackInfo.thread );
6220 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6222 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
6223 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
6224 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6231 if ( handle->buffer[1] ) {
6232 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
6233 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6240 CloseHandle( handle->condition );
6242 stream_.apiHandle = 0;
6245 for ( int i=0; i<2; i++ ) {
6246 if ( stream_.userBuffer[i] ) {
6247 free( stream_.userBuffer[i] );
6248 stream_.userBuffer[i] = 0;
6252 if ( stream_.deviceBuffer ) {
6253 free( stream_.deviceBuffer );
6254 stream_.deviceBuffer = 0;
6257 stream_.mode = UNINITIALIZED;
6258 stream_.state = STREAM_CLOSED;
6261 void RtApiDs :: startStream()
6264 if ( stream_.state == STREAM_RUNNING ) {
6265 errorText_ = "RtApiDs::startStream(): the stream is already running!";
6266 error( RtAudioError::WARNING );
6270 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6272 // Increase scheduler frequency on lesser windows (a side-effect of
6273 // increasing timer accuracy). On greater windows (Win2K or later),
6274 // this is already in effect.
6275 timeBeginPeriod( 1 );
6277 buffersRolling = false;
6278 duplexPrerollBytes = 0;
6280 if ( stream_.mode == DUPLEX ) {
6281 // 0.5 seconds of silence in DUPLEX mode while the devices spin up and synchronize.
6282 duplexPrerollBytes = (int) ( 0.5 * stream_.sampleRate * formatBytes( stream_.deviceFormat[1] ) * stream_.nDeviceChannels[1] );
6286 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6288 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6289 result = buffer->Play( 0, 0, DSBPLAY_LOOPING );
6290 if ( FAILED( result ) ) {
6291 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting output buffer!";
6292 errorText_ = errorStream_.str();
6297 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6299 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6300 result = buffer->Start( DSCBSTART_LOOPING );
6301 if ( FAILED( result ) ) {
6302 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting input buffer!";
6303 errorText_ = errorStream_.str();
6308 handle->drainCounter = 0;
6309 handle->internalDrain = false;
6310 ResetEvent( handle->condition );
6311 stream_.state = STREAM_RUNNING;
6314 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
6317 void RtApiDs :: stopStream()
6320 if ( stream_.state == STREAM_STOPPED ) {
6321 errorText_ = "RtApiDs::stopStream(): the stream is already stopped!";
6322 error( RtAudioError::WARNING );
6329 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6330 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6331 if ( handle->drainCounter == 0 ) {
6332 handle->drainCounter = 2;
6333 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
6336 stream_.state = STREAM_STOPPED;
6338 MUTEX_LOCK( &stream_.mutex );
6340 // Stop the buffer and clear memory
6341 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6342 result = buffer->Stop();
6343 if ( FAILED( result ) ) {
6344 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping output buffer!";
6345 errorText_ = errorStream_.str();
6349 // Lock the buffer and clear it so that if we start to play again,
6350 // we won't have old data playing.
6351 result = buffer->Lock( 0, handle->dsBufferSize[0], &audioPtr, &dataLen, NULL, NULL, 0 );
6352 if ( FAILED( result ) ) {
6353 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking output buffer!";
6354 errorText_ = errorStream_.str();
6358 // Zero the DS buffer
6359 ZeroMemory( audioPtr, dataLen );
6361 // Unlock the DS buffer
6362 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6363 if ( FAILED( result ) ) {
6364 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking output buffer!";
6365 errorText_ = errorStream_.str();
6369 // If we start playing again, we must begin at beginning of buffer.
6370 handle->bufferPointer[0] = 0;
6373 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6374 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6378 stream_.state = STREAM_STOPPED;
6380 if ( stream_.mode != DUPLEX )
6381 MUTEX_LOCK( &stream_.mutex );
6383 result = buffer->Stop();
6384 if ( FAILED( result ) ) {
6385 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping input buffer!";
6386 errorText_ = errorStream_.str();
6390 // Lock the buffer and clear it so that if we start to play again,
6391 // we won't have old data playing.
6392 result = buffer->Lock( 0, handle->dsBufferSize[1], &audioPtr, &dataLen, NULL, NULL, 0 );
6393 if ( FAILED( result ) ) {
6394 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking input buffer!";
6395 errorText_ = errorStream_.str();
6399 // Zero the DS buffer
6400 ZeroMemory( audioPtr, dataLen );
6402 // Unlock the DS buffer
6403 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6404 if ( FAILED( result ) ) {
6405 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking input buffer!";
6406 errorText_ = errorStream_.str();
6410 // If we start recording again, we must begin at beginning of buffer.
6411 handle->bufferPointer[1] = 0;
6415 timeEndPeriod( 1 ); // revert to normal scheduler frequency on lesser windows.
6416 MUTEX_UNLOCK( &stream_.mutex );
6418 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
6421 void RtApiDs :: abortStream()
6424 if ( stream_.state == STREAM_STOPPED ) {
6425 errorText_ = "RtApiDs::abortStream(): the stream is already stopped!";
6426 error( RtAudioError::WARNING );
6430 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6431 handle->drainCounter = 2;
6436 void RtApiDs :: callbackEvent()
6438 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) {
6439 Sleep( 50 ); // sleep 50 milliseconds
6443 if ( stream_.state == STREAM_CLOSED ) {
6444 errorText_ = "RtApiDs::callbackEvent(): the stream is closed ... this shouldn't happen!";
6445 error( RtAudioError::WARNING );
6449 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
6450 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6452 // Check if we were draining the stream and signal is finished.
6453 if ( handle->drainCounter > stream_.nBuffers + 2 ) {
6455 stream_.state = STREAM_STOPPING;
6456 if ( handle->internalDrain == false )
6457 SetEvent( handle->condition );
6463 // Invoke user callback to get fresh output data UNLESS we are
6465 if ( handle->drainCounter == 0 ) {
6466 RtAudioCallback callback = (RtAudioCallback) info->callback;
6467 double streamTime = getStreamTime();
6468 RtAudioStreamStatus status = 0;
6469 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
6470 status |= RTAUDIO_OUTPUT_UNDERFLOW;
6471 handle->xrun[0] = false;
6473 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
6474 status |= RTAUDIO_INPUT_OVERFLOW;
6475 handle->xrun[1] = false;
6477 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
6478 stream_.bufferSize, streamTime, status, info->userData );
6479 if ( cbReturnValue == 2 ) {
6480 stream_.state = STREAM_STOPPING;
6481 handle->drainCounter = 2;
6485 else if ( cbReturnValue == 1 ) {
6486 handle->drainCounter = 1;
6487 handle->internalDrain = true;
6492 DWORD currentWritePointer, safeWritePointer;
6493 DWORD currentReadPointer, safeReadPointer;
6494 UINT nextWritePointer;
6496 LPVOID buffer1 = NULL;
6497 LPVOID buffer2 = NULL;
6498 DWORD bufferSize1 = 0;
6499 DWORD bufferSize2 = 0;
6504 MUTEX_LOCK( &stream_.mutex );
6505 if ( stream_.state == STREAM_STOPPED ) {
6506 MUTEX_UNLOCK( &stream_.mutex );
6510 if ( buffersRolling == false ) {
6511 if ( stream_.mode == DUPLEX ) {
6512 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
6514 // It takes a while for the devices to get rolling. As a result,
6515 // there's no guarantee that the capture and write device pointers
6516 // will move in lockstep. Wait here for both devices to start
6517 // rolling, and then set our buffer pointers accordingly.
6518 // e.g. Crystal Drivers: the capture buffer starts up 5700 to 9600
6519 // bytes later than the write buffer.
6521 // Stub: a serious risk of having a pre-emptive scheduling round
6522 // take place between the two GetCurrentPosition calls... but I'm
6523 // really not sure how to solve the problem. Temporarily boost to
6524 // Realtime priority, maybe; but I'm not sure what priority the
6525 // DirectSound service threads run at. We *should* be roughly
6526 // within a ms or so of correct.
6528 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6529 LPDIRECTSOUNDCAPTUREBUFFER dsCaptureBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6531 DWORD startSafeWritePointer, startSafeReadPointer;
6533 result = dsWriteBuffer->GetCurrentPosition( NULL, &startSafeWritePointer );
6534 if ( FAILED( result ) ) {
6535 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6536 errorText_ = errorStream_.str();
6537 MUTEX_UNLOCK( &stream_.mutex );
6538 error( RtAudioError::SYSTEM_ERROR );
6541 result = dsCaptureBuffer->GetCurrentPosition( NULL, &startSafeReadPointer );
6542 if ( FAILED( result ) ) {
6543 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6544 errorText_ = errorStream_.str();
6545 MUTEX_UNLOCK( &stream_.mutex );
6546 error( RtAudioError::SYSTEM_ERROR );
6550 result = dsWriteBuffer->GetCurrentPosition( NULL, &safeWritePointer );
6551 if ( FAILED( result ) ) {
6552 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6553 errorText_ = errorStream_.str();
6554 MUTEX_UNLOCK( &stream_.mutex );
6555 error( RtAudioError::SYSTEM_ERROR );
6558 result = dsCaptureBuffer->GetCurrentPosition( NULL, &safeReadPointer );
6559 if ( FAILED( result ) ) {
6560 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6561 errorText_ = errorStream_.str();
6562 MUTEX_UNLOCK( &stream_.mutex );
6563 error( RtAudioError::SYSTEM_ERROR );
6566 if ( safeWritePointer != startSafeWritePointer && safeReadPointer != startSafeReadPointer ) break;
6570 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
6572 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
6573 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
6574 handle->bufferPointer[1] = safeReadPointer;
6576 else if ( stream_.mode == OUTPUT ) {
6578 // Set the proper nextWritePosition after initial startup.
6579 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6580 result = dsWriteBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
6581 if ( FAILED( result ) ) {
6582 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6583 errorText_ = errorStream_.str();
6584 MUTEX_UNLOCK( &stream_.mutex );
6585 error( RtAudioError::SYSTEM_ERROR );
6588 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
6589 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
6592 buffersRolling = true;
6595 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6597 LPDIRECTSOUNDBUFFER dsBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6599 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
6600 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
6601 bufferBytes *= formatBytes( stream_.userFormat );
6602 memset( stream_.userBuffer[0], 0, bufferBytes );
6605 // Setup parameters and do buffer conversion if necessary.
6606 if ( stream_.doConvertBuffer[0] ) {
6607 buffer = stream_.deviceBuffer;
6608 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
6609 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[0];
6610 bufferBytes *= formatBytes( stream_.deviceFormat[0] );
6613 buffer = stream_.userBuffer[0];
6614 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
6615 bufferBytes *= formatBytes( stream_.userFormat );
6618 // No byte swapping necessary in DirectSound implementation.
6620 // Ahhh ... windoze. 16-bit data is signed but 8-bit data is
6621 // unsigned. So, we need to convert our signed 8-bit data here to
6623 if ( stream_.deviceFormat[0] == RTAUDIO_SINT8 )
6624 for ( int i=0; i<bufferBytes; i++ ) buffer[i] = (unsigned char) ( buffer[i] + 128 );
6626 DWORD dsBufferSize = handle->dsBufferSize[0];
6627 nextWritePointer = handle->bufferPointer[0];
6629 DWORD endWrite, leadPointer;
6631 // Find out where the read and "safe write" pointers are.
6632 result = dsBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
6633 if ( FAILED( result ) ) {
6634 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6635 errorText_ = errorStream_.str();
6636 MUTEX_UNLOCK( &stream_.mutex );
6637 error( RtAudioError::SYSTEM_ERROR );
6641 // We will copy our output buffer into the region between
6642 // safeWritePointer and leadPointer. If leadPointer is not
6643 // beyond the next endWrite position, wait until it is.
6644 leadPointer = safeWritePointer + handle->dsPointerLeadTime[0];
6645 //std::cout << "safeWritePointer = " << safeWritePointer << ", leadPointer = " << leadPointer << ", nextWritePointer = " << nextWritePointer << std::endl;
6646 if ( leadPointer > dsBufferSize ) leadPointer -= dsBufferSize;
6647 if ( leadPointer < nextWritePointer ) leadPointer += dsBufferSize; // unwrap offset
6648 endWrite = nextWritePointer + bufferBytes;
6650 // Check whether the entire write region is behind the play pointer.
6651 if ( leadPointer >= endWrite ) break;
6653 // If we are here, then we must wait until the leadPointer advances
6654 // beyond the end of our next write region. We use the
6655 // Sleep() function to suspend operation until that happens.
6656 double millis = ( endWrite - leadPointer ) * 1000.0;
6657 millis /= ( formatBytes( stream_.deviceFormat[0]) * stream_.nDeviceChannels[0] * stream_.sampleRate);
6658 if ( millis < 1.0 ) millis = 1.0;
6659 Sleep( (DWORD) millis );
6662 if ( dsPointerBetween( nextWritePointer, safeWritePointer, currentWritePointer, dsBufferSize )
6663 || dsPointerBetween( endWrite, safeWritePointer, currentWritePointer, dsBufferSize ) ) {
6664 // We've strayed into the forbidden zone ... resync the read pointer.
6665 handle->xrun[0] = true;
6666 nextWritePointer = safeWritePointer + handle->dsPointerLeadTime[0] - bufferBytes;
6667 if ( nextWritePointer >= dsBufferSize ) nextWritePointer -= dsBufferSize;
6668 handle->bufferPointer[0] = nextWritePointer;
6669 endWrite = nextWritePointer + bufferBytes;
6672 // Lock free space in the buffer
6673 result = dsBuffer->Lock( nextWritePointer, bufferBytes, &buffer1,
6674 &bufferSize1, &buffer2, &bufferSize2, 0 );
6675 if ( FAILED( result ) ) {
6676 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking buffer during playback!";
6677 errorText_ = errorStream_.str();
6678 MUTEX_UNLOCK( &stream_.mutex );
6679 error( RtAudioError::SYSTEM_ERROR );
6683 // Copy our buffer into the DS buffer
6684 CopyMemory( buffer1, buffer, bufferSize1 );
6685 if ( buffer2 != NULL ) CopyMemory( buffer2, buffer+bufferSize1, bufferSize2 );
6687 // Update our buffer offset and unlock sound buffer
6688 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
6689 if ( FAILED( result ) ) {
6690 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking buffer during playback!";
6691 errorText_ = errorStream_.str();
6692 MUTEX_UNLOCK( &stream_.mutex );
6693 error( RtAudioError::SYSTEM_ERROR );
6696 nextWritePointer = ( nextWritePointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
6697 handle->bufferPointer[0] = nextWritePointer;
6700 // Don't bother draining input
6701 if ( handle->drainCounter ) {
6702 handle->drainCounter++;
6706 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6708 // Setup parameters.
6709 if ( stream_.doConvertBuffer[1] ) {
6710 buffer = stream_.deviceBuffer;
6711 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[1];
6712 bufferBytes *= formatBytes( stream_.deviceFormat[1] );
6715 buffer = stream_.userBuffer[1];
6716 bufferBytes = stream_.bufferSize * stream_.nUserChannels[1];
6717 bufferBytes *= formatBytes( stream_.userFormat );
6720 LPDIRECTSOUNDCAPTUREBUFFER dsBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6721 long nextReadPointer = handle->bufferPointer[1];
6722 DWORD dsBufferSize = handle->dsBufferSize[1];
6724 // Find out where the write and "safe read" pointers are.
6725 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
6726 if ( FAILED( result ) ) {
6727 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6728 errorText_ = errorStream_.str();
6729 MUTEX_UNLOCK( &stream_.mutex );
6730 error( RtAudioError::SYSTEM_ERROR );
6734 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
6735 DWORD endRead = nextReadPointer + bufferBytes;
6737 // Handling depends on whether we are INPUT or DUPLEX.
6738 // If we're in INPUT mode then waiting is a good thing. If we're in DUPLEX mode,
6739 // then a wait here will drag the write pointers into the forbidden zone.
6741 // In DUPLEX mode, rather than wait, we will back off the read pointer until
6742 // it's in a safe position. This causes dropouts, but it seems to be the only
6743 // practical way to sync up the read and write pointers reliably, given the
6744 // the very complex relationship between phase and increment of the read and write
6747 // In order to minimize audible dropouts in DUPLEX mode, we will
6748 // provide a pre-roll period of 0.5 seconds in which we return
6749 // zeros from the read buffer while the pointers sync up.
6751 if ( stream_.mode == DUPLEX ) {
6752 if ( safeReadPointer < endRead ) {
6753 if ( duplexPrerollBytes <= 0 ) {
6754 // Pre-roll time over. Be more agressive.
6755 int adjustment = endRead-safeReadPointer;
6757 handle->xrun[1] = true;
6759 // - large adjustments: we've probably run out of CPU cycles, so just resync exactly,
6760 // and perform fine adjustments later.
6761 // - small adjustments: back off by twice as much.
6762 if ( adjustment >= 2*bufferBytes )
6763 nextReadPointer = safeReadPointer-2*bufferBytes;
6765 nextReadPointer = safeReadPointer-bufferBytes-adjustment;
6767 if ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
6771 // In pre=roll time. Just do it.
6772 nextReadPointer = safeReadPointer - bufferBytes;
6773 while ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
6775 endRead = nextReadPointer + bufferBytes;
6778 else { // mode == INPUT
6779 while ( safeReadPointer < endRead && stream_.callbackInfo.isRunning ) {
6780 // See comments for playback.
6781 double millis = (endRead - safeReadPointer) * 1000.0;
6782 millis /= ( formatBytes(stream_.deviceFormat[1]) * stream_.nDeviceChannels[1] * stream_.sampleRate);
6783 if ( millis < 1.0 ) millis = 1.0;
6784 Sleep( (DWORD) millis );
6786 // Wake up and find out where we are now.
6787 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
6788 if ( FAILED( result ) ) {
6789 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6790 errorText_ = errorStream_.str();
6791 MUTEX_UNLOCK( &stream_.mutex );
6792 error( RtAudioError::SYSTEM_ERROR );
6796 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
6800 // Lock free space in the buffer
6801 result = dsBuffer->Lock( nextReadPointer, bufferBytes, &buffer1,
6802 &bufferSize1, &buffer2, &bufferSize2, 0 );
6803 if ( FAILED( result ) ) {
6804 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking capture buffer!";
6805 errorText_ = errorStream_.str();
6806 MUTEX_UNLOCK( &stream_.mutex );
6807 error( RtAudioError::SYSTEM_ERROR );
6811 if ( duplexPrerollBytes <= 0 ) {
6812 // Copy our buffer into the DS buffer
6813 CopyMemory( buffer, buffer1, bufferSize1 );
6814 if ( buffer2 != NULL ) CopyMemory( buffer+bufferSize1, buffer2, bufferSize2 );
6817 memset( buffer, 0, bufferSize1 );
6818 if ( buffer2 != NULL ) memset( buffer + bufferSize1, 0, bufferSize2 );
6819 duplexPrerollBytes -= bufferSize1 + bufferSize2;
6822 // Update our buffer offset and unlock sound buffer
6823 nextReadPointer = ( nextReadPointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
6824 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
6825 if ( FAILED( result ) ) {
6826 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking capture buffer!";
6827 errorText_ = errorStream_.str();
6828 MUTEX_UNLOCK( &stream_.mutex );
6829 error( RtAudioError::SYSTEM_ERROR );
6832 handle->bufferPointer[1] = nextReadPointer;
6834 // No byte swapping necessary in DirectSound implementation.
6836 // If necessary, convert 8-bit data from unsigned to signed.
6837 if ( stream_.deviceFormat[1] == RTAUDIO_SINT8 )
6838 for ( int j=0; j<bufferBytes; j++ ) buffer[j] = (signed char) ( buffer[j] - 128 );
6840 // Do buffer conversion if necessary.
6841 if ( stream_.doConvertBuffer[1] )
6842 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
6846 MUTEX_UNLOCK( &stream_.mutex );
6847 RtApi::tickStreamTime();
6850 // Definitions for utility functions and callbacks
6851 // specific to the DirectSound implementation.
6853 static unsigned __stdcall callbackHandler( void *ptr )
6855 CallbackInfo *info = (CallbackInfo *) ptr;
6856 RtApiDs *object = (RtApiDs *) info->object;
6857 bool* isRunning = &info->isRunning;
6859 while ( *isRunning == true ) {
6860 object->callbackEvent();
6867 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
6868 LPCTSTR description,
6872 struct DsProbeData& probeInfo = *(struct DsProbeData*) lpContext;
6873 std::vector<struct DsDevice>& dsDevices = *probeInfo.dsDevices;
6876 bool validDevice = false;
6877 if ( probeInfo.isInput == true ) {
6879 LPDIRECTSOUNDCAPTURE object;
6881 hr = DirectSoundCaptureCreate( lpguid, &object, NULL );
6882 if ( hr != DS_OK ) return TRUE;
6884 caps.dwSize = sizeof(caps);
6885 hr = object->GetCaps( &caps );
6886 if ( hr == DS_OK ) {
6887 if ( caps.dwChannels > 0 && caps.dwFormats > 0 )
6894 LPDIRECTSOUND object;
6895 hr = DirectSoundCreate( lpguid, &object, NULL );
6896 if ( hr != DS_OK ) return TRUE;
6898 caps.dwSize = sizeof(caps);
6899 hr = object->GetCaps( &caps );
6900 if ( hr == DS_OK ) {
6901 if ( caps.dwFlags & DSCAPS_PRIMARYMONO || caps.dwFlags & DSCAPS_PRIMARYSTEREO )
6907 // If good device, then save its name and guid.
6908 std::string name = convertCharPointerToStdString( description );
6909 //if ( name == "Primary Sound Driver" || name == "Primary Sound Capture Driver" )
6910 if ( lpguid == NULL )
6911 name = "Default Device";
6912 if ( validDevice ) {
6913 for ( unsigned int i=0; i<dsDevices.size(); i++ ) {
6914 if ( dsDevices[i].name == name ) {
6915 dsDevices[i].found = true;
6916 if ( probeInfo.isInput ) {
6917 dsDevices[i].id[1] = lpguid;
6918 dsDevices[i].validId[1] = true;
6921 dsDevices[i].id[0] = lpguid;
6922 dsDevices[i].validId[0] = true;
6930 device.found = true;
6931 if ( probeInfo.isInput ) {
6932 device.id[1] = lpguid;
6933 device.validId[1] = true;
6936 device.id[0] = lpguid;
6937 device.validId[0] = true;
6939 dsDevices.push_back( device );
6945 static const char* getErrorString( int code )
6949 case DSERR_ALLOCATED:
6950 return "Already allocated";
6952 case DSERR_CONTROLUNAVAIL:
6953 return "Control unavailable";
6955 case DSERR_INVALIDPARAM:
6956 return "Invalid parameter";
6958 case DSERR_INVALIDCALL:
6959 return "Invalid call";
6962 return "Generic error";
6964 case DSERR_PRIOLEVELNEEDED:
6965 return "Priority level needed";
6967 case DSERR_OUTOFMEMORY:
6968 return "Out of memory";
6970 case DSERR_BADFORMAT:
6971 return "The sample rate or the channel format is not supported";
6973 case DSERR_UNSUPPORTED:
6974 return "Not supported";
6976 case DSERR_NODRIVER:
6979 case DSERR_ALREADYINITIALIZED:
6980 return "Already initialized";
6982 case DSERR_NOAGGREGATION:
6983 return "No aggregation";
6985 case DSERR_BUFFERLOST:
6986 return "Buffer lost";
6988 case DSERR_OTHERAPPHASPRIO:
6989 return "Another application already has priority";
6991 case DSERR_UNINITIALIZED:
6992 return "Uninitialized";
6995 return "DirectSound unknown error";
6998 //******************** End of __WINDOWS_DS__ *********************//
7002 #if defined(__LINUX_ALSA__)
7004 #include <alsa/asoundlib.h>
7007 // A structure to hold various information related to the ALSA API
7010 snd_pcm_t *handles[2];
7013 pthread_cond_t runnable_cv;
7017 :synchronized(false), runnable(false) { xrun[0] = false; xrun[1] = false; }
7020 static void *alsaCallbackHandler( void * ptr );
7022 RtApiAlsa :: RtApiAlsa()
7024 // Nothing to do here.
7027 RtApiAlsa :: ~RtApiAlsa()
7029 if ( stream_.state != STREAM_CLOSED ) closeStream();
7032 unsigned int RtApiAlsa :: getDeviceCount( void )
7034 unsigned nDevices = 0;
7035 int result, subdevice, card;
7039 // Count cards and devices
7041 snd_card_next( &card );
7042 while ( card >= 0 ) {
7043 sprintf( name, "hw:%d", card );
7044 result = snd_ctl_open( &handle, name, 0 );
7046 errorStream_ << "RtApiAlsa::getDeviceCount: control open, card = " << card << ", " << snd_strerror( result ) << ".";
7047 errorText_ = errorStream_.str();
7048 error( RtAudioError::WARNING );
7053 result = snd_ctl_pcm_next_device( handle, &subdevice );
7055 errorStream_ << "RtApiAlsa::getDeviceCount: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
7056 errorText_ = errorStream_.str();
7057 error( RtAudioError::WARNING );
7060 if ( subdevice < 0 )
7065 snd_ctl_close( handle );
7066 snd_card_next( &card );
7069 result = snd_ctl_open( &handle, "default", 0 );
7072 snd_ctl_close( handle );
7078 RtAudio::DeviceInfo RtApiAlsa :: getDeviceInfo( unsigned int device )
7080 RtAudio::DeviceInfo info;
7081 info.probed = false;
7083 unsigned nDevices = 0;
7084 int result, subdevice, card;
7088 // Count cards and devices
7091 snd_card_next( &card );
7092 while ( card >= 0 ) {
7093 sprintf( name, "hw:%d", card );
7094 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
7096 errorStream_ << "RtApiAlsa::getDeviceInfo: control open, card = " << card << ", " << snd_strerror( result ) << ".";
7097 errorText_ = errorStream_.str();
7098 error( RtAudioError::WARNING );
7103 result = snd_ctl_pcm_next_device( chandle, &subdevice );
7105 errorStream_ << "RtApiAlsa::getDeviceInfo: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
7106 errorText_ = errorStream_.str();
7107 error( RtAudioError::WARNING );
7110 if ( subdevice < 0 ) break;
7111 if ( nDevices == device ) {
7112 sprintf( name, "hw:%d,%d", card, subdevice );
7118 snd_ctl_close( chandle );
7119 snd_card_next( &card );
7122 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
7123 if ( result == 0 ) {
7124 if ( nDevices == device ) {
7125 strcpy( name, "default" );
7131 if ( nDevices == 0 ) {
7132 errorText_ = "RtApiAlsa::getDeviceInfo: no devices found!";
7133 error( RtAudioError::INVALID_USE );
7137 if ( device >= nDevices ) {
7138 errorText_ = "RtApiAlsa::getDeviceInfo: device ID is invalid!";
7139 error( RtAudioError::INVALID_USE );
7145 // If a stream is already open, we cannot probe the stream devices.
7146 // Thus, use the saved results.
7147 if ( stream_.state != STREAM_CLOSED &&
7148 ( stream_.device[0] == device || stream_.device[1] == device ) ) {
7149 snd_ctl_close( chandle );
7150 if ( device >= devices_.size() ) {
7151 errorText_ = "RtApiAlsa::getDeviceInfo: device ID was not present before stream was opened.";
7152 error( RtAudioError::WARNING );
7155 return devices_[ device ];
7158 int openMode = SND_PCM_ASYNC;
7159 snd_pcm_stream_t stream;
7160 snd_pcm_info_t *pcminfo;
7161 snd_pcm_info_alloca( &pcminfo );
7163 snd_pcm_hw_params_t *params;
7164 snd_pcm_hw_params_alloca( ¶ms );
7166 // First try for playback unless default device (which has subdev -1)
7167 stream = SND_PCM_STREAM_PLAYBACK;
7168 snd_pcm_info_set_stream( pcminfo, stream );
7169 if ( subdevice != -1 ) {
7170 snd_pcm_info_set_device( pcminfo, subdevice );
7171 snd_pcm_info_set_subdevice( pcminfo, 0 );
7173 result = snd_ctl_pcm_info( chandle, pcminfo );
7175 // Device probably doesn't support playback.
7180 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK );
7182 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7183 errorText_ = errorStream_.str();
7184 error( RtAudioError::WARNING );
7188 // The device is open ... fill the parameter structure.
7189 result = snd_pcm_hw_params_any( phandle, params );
7191 snd_pcm_close( phandle );
7192 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7193 errorText_ = errorStream_.str();
7194 error( RtAudioError::WARNING );
7198 // Get output channel information.
7200 result = snd_pcm_hw_params_get_channels_max( params, &value );
7202 snd_pcm_close( phandle );
7203 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") output channels, " << snd_strerror( result ) << ".";
7204 errorText_ = errorStream_.str();
7205 error( RtAudioError::WARNING );
7208 info.outputChannels = value;
7209 snd_pcm_close( phandle );
7212 stream = SND_PCM_STREAM_CAPTURE;
7213 snd_pcm_info_set_stream( pcminfo, stream );
7215 // Now try for capture unless default device (with subdev = -1)
7216 if ( subdevice != -1 ) {
7217 result = snd_ctl_pcm_info( chandle, pcminfo );
7218 snd_ctl_close( chandle );
7220 // Device probably doesn't support capture.
7221 if ( info.outputChannels == 0 ) return info;
7222 goto probeParameters;
7226 snd_ctl_close( chandle );
7228 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
7230 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7231 errorText_ = errorStream_.str();
7232 error( RtAudioError::WARNING );
7233 if ( info.outputChannels == 0 ) return info;
7234 goto probeParameters;
7237 // The device is open ... fill the parameter structure.
7238 result = snd_pcm_hw_params_any( phandle, params );
7240 snd_pcm_close( phandle );
7241 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7242 errorText_ = errorStream_.str();
7243 error( RtAudioError::WARNING );
7244 if ( info.outputChannels == 0 ) return info;
7245 goto probeParameters;
7248 result = snd_pcm_hw_params_get_channels_max( params, &value );
7250 snd_pcm_close( phandle );
7251 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") input channels, " << snd_strerror( result ) << ".";
7252 errorText_ = errorStream_.str();
7253 error( RtAudioError::WARNING );
7254 if ( info.outputChannels == 0 ) return info;
7255 goto probeParameters;
7257 info.inputChannels = value;
7258 snd_pcm_close( phandle );
7260 // If device opens for both playback and capture, we determine the channels.
7261 if ( info.outputChannels > 0 && info.inputChannels > 0 )
7262 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
7264 // ALSA doesn't provide default devices so we'll use the first available one.
7265 if ( device == 0 && info.outputChannels > 0 )
7266 info.isDefaultOutput = true;
7267 if ( device == 0 && info.inputChannels > 0 )
7268 info.isDefaultInput = true;
7271 // At this point, we just need to figure out the supported data
7272 // formats and sample rates. We'll proceed by opening the device in
7273 // the direction with the maximum number of channels, or playback if
7274 // they are equal. This might limit our sample rate options, but so
7277 if ( info.outputChannels >= info.inputChannels )
7278 stream = SND_PCM_STREAM_PLAYBACK;
7280 stream = SND_PCM_STREAM_CAPTURE;
7281 snd_pcm_info_set_stream( pcminfo, stream );
7283 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
7285 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7286 errorText_ = errorStream_.str();
7287 error( RtAudioError::WARNING );
7291 // The device is open ... fill the parameter structure.
7292 result = snd_pcm_hw_params_any( phandle, params );
7294 snd_pcm_close( phandle );
7295 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7296 errorText_ = errorStream_.str();
7297 error( RtAudioError::WARNING );
7301 // Test our discrete set of sample rate values.
7302 info.sampleRates.clear();
7303 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
7304 if ( snd_pcm_hw_params_test_rate( phandle, params, SAMPLE_RATES[i], 0 ) == 0 ) {
7305 info.sampleRates.push_back( SAMPLE_RATES[i] );
7307 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
7308 info.preferredSampleRate = SAMPLE_RATES[i];
7311 if ( info.sampleRates.size() == 0 ) {
7312 snd_pcm_close( phandle );
7313 errorStream_ << "RtApiAlsa::getDeviceInfo: no supported sample rates found for device (" << name << ").";
7314 errorText_ = errorStream_.str();
7315 error( RtAudioError::WARNING );
7319 // Probe the supported data formats ... we don't care about endian-ness just yet
7320 snd_pcm_format_t format;
7321 info.nativeFormats = 0;
7322 format = SND_PCM_FORMAT_S8;
7323 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7324 info.nativeFormats |= RTAUDIO_SINT8;
7325 format = SND_PCM_FORMAT_S16;
7326 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7327 info.nativeFormats |= RTAUDIO_SINT16;
7328 format = SND_PCM_FORMAT_S24;
7329 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7330 info.nativeFormats |= RTAUDIO_SINT24;
7331 format = SND_PCM_FORMAT_S32;
7332 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7333 info.nativeFormats |= RTAUDIO_SINT32;
7334 format = SND_PCM_FORMAT_FLOAT;
7335 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7336 info.nativeFormats |= RTAUDIO_FLOAT32;
7337 format = SND_PCM_FORMAT_FLOAT64;
7338 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7339 info.nativeFormats |= RTAUDIO_FLOAT64;
7341 // Check that we have at least one supported format
7342 if ( info.nativeFormats == 0 ) {
7343 snd_pcm_close( phandle );
7344 errorStream_ << "RtApiAlsa::getDeviceInfo: pcm device (" << name << ") data format not supported by RtAudio.";
7345 errorText_ = errorStream_.str();
7346 error( RtAudioError::WARNING );
7350 // Get the device name
7352 result = snd_card_get_name( card, &cardname );
7353 if ( result >= 0 ) {
7354 sprintf( name, "hw:%s,%d", cardname, subdevice );
7359 // That's all ... close the device and return
7360 snd_pcm_close( phandle );
7365 void RtApiAlsa :: saveDeviceInfo( void )
7369 unsigned int nDevices = getDeviceCount();
7370 devices_.resize( nDevices );
7371 for ( unsigned int i=0; i<nDevices; i++ )
7372 devices_[i] = getDeviceInfo( i );
7375 bool RtApiAlsa :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
7376 unsigned int firstChannel, unsigned int sampleRate,
7377 RtAudioFormat format, unsigned int *bufferSize,
7378 RtAudio::StreamOptions *options )
7381 #if defined(__RTAUDIO_DEBUG__)
7383 snd_output_stdio_attach(&out, stderr, 0);
7386 // I'm not using the "plug" interface ... too much inconsistent behavior.
7388 unsigned nDevices = 0;
7389 int result, subdevice, card;
7393 if ( options && options->flags & RTAUDIO_ALSA_USE_DEFAULT )
7394 snprintf(name, sizeof(name), "%s", "default");
7396 // Count cards and devices
7398 snd_card_next( &card );
7399 while ( card >= 0 ) {
7400 sprintf( name, "hw:%d", card );
7401 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
7403 errorStream_ << "RtApiAlsa::probeDeviceOpen: control open, card = " << card << ", " << snd_strerror( result ) << ".";
7404 errorText_ = errorStream_.str();
7409 result = snd_ctl_pcm_next_device( chandle, &subdevice );
7410 if ( result < 0 ) break;
7411 if ( subdevice < 0 ) break;
7412 if ( nDevices == device ) {
7413 sprintf( name, "hw:%d,%d", card, subdevice );
7414 snd_ctl_close( chandle );
7419 snd_ctl_close( chandle );
7420 snd_card_next( &card );
7423 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
7424 if ( result == 0 ) {
7425 if ( nDevices == device ) {
7426 strcpy( name, "default" );
7432 if ( nDevices == 0 ) {
7433 // This should not happen because a check is made before this function is called.
7434 errorText_ = "RtApiAlsa::probeDeviceOpen: no devices found!";
7438 if ( device >= nDevices ) {
7439 // This should not happen because a check is made before this function is called.
7440 errorText_ = "RtApiAlsa::probeDeviceOpen: device ID is invalid!";
7447 // The getDeviceInfo() function will not work for a device that is
7448 // already open. Thus, we'll probe the system before opening a
7449 // stream and save the results for use by getDeviceInfo().
7450 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) // only do once
7451 this->saveDeviceInfo();
7453 snd_pcm_stream_t stream;
7454 if ( mode == OUTPUT )
7455 stream = SND_PCM_STREAM_PLAYBACK;
7457 stream = SND_PCM_STREAM_CAPTURE;
7460 int openMode = SND_PCM_ASYNC;
7461 result = snd_pcm_open( &phandle, name, stream, openMode );
7463 if ( mode == OUTPUT )
7464 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for output.";
7466 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for input.";
7467 errorText_ = errorStream_.str();
7471 // Fill the parameter structure.
7472 snd_pcm_hw_params_t *hw_params;
7473 snd_pcm_hw_params_alloca( &hw_params );
7474 result = snd_pcm_hw_params_any( phandle, hw_params );
7476 snd_pcm_close( phandle );
7477 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") parameters, " << snd_strerror( result ) << ".";
7478 errorText_ = errorStream_.str();
7482 #if defined(__RTAUDIO_DEBUG__)
7483 fprintf( stderr, "\nRtApiAlsa: dump hardware params just after device open:\n\n" );
7484 snd_pcm_hw_params_dump( hw_params, out );
7487 // Set access ... check user preference.
7488 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) {
7489 stream_.userInterleaved = false;
7490 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
7492 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
7493 stream_.deviceInterleaved[mode] = true;
7496 stream_.deviceInterleaved[mode] = false;
7499 stream_.userInterleaved = true;
7500 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
7502 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
7503 stream_.deviceInterleaved[mode] = false;
7506 stream_.deviceInterleaved[mode] = true;
7510 snd_pcm_close( phandle );
7511 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") access, " << snd_strerror( result ) << ".";
7512 errorText_ = errorStream_.str();
7516 // Determine how to set the device format.
7517 stream_.userFormat = format;
7518 snd_pcm_format_t deviceFormat = SND_PCM_FORMAT_UNKNOWN;
7520 if ( format == RTAUDIO_SINT8 )
7521 deviceFormat = SND_PCM_FORMAT_S8;
7522 else if ( format == RTAUDIO_SINT16 )
7523 deviceFormat = SND_PCM_FORMAT_S16;
7524 else if ( format == RTAUDIO_SINT24 )
7525 deviceFormat = SND_PCM_FORMAT_S24;
7526 else if ( format == RTAUDIO_SINT32 )
7527 deviceFormat = SND_PCM_FORMAT_S32;
7528 else if ( format == RTAUDIO_FLOAT32 )
7529 deviceFormat = SND_PCM_FORMAT_FLOAT;
7530 else if ( format == RTAUDIO_FLOAT64 )
7531 deviceFormat = SND_PCM_FORMAT_FLOAT64;
7533 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat) == 0) {
7534 stream_.deviceFormat[mode] = format;
7538 // The user requested format is not natively supported by the device.
7539 deviceFormat = SND_PCM_FORMAT_FLOAT64;
7540 if ( snd_pcm_hw_params_test_format( phandle, hw_params, deviceFormat ) == 0 ) {
7541 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
7545 deviceFormat = SND_PCM_FORMAT_FLOAT;
7546 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7547 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
7551 deviceFormat = SND_PCM_FORMAT_S32;
7552 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7553 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
7557 deviceFormat = SND_PCM_FORMAT_S24;
7558 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7559 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
7563 deviceFormat = SND_PCM_FORMAT_S16;
7564 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7565 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
7569 deviceFormat = SND_PCM_FORMAT_S8;
7570 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7571 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
7575 // If we get here, no supported format was found.
7576 snd_pcm_close( phandle );
7577 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device " << device << " data format not supported by RtAudio.";
7578 errorText_ = errorStream_.str();
7582 result = snd_pcm_hw_params_set_format( phandle, hw_params, deviceFormat );
7584 snd_pcm_close( phandle );
7585 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") data format, " << snd_strerror( result ) << ".";
7586 errorText_ = errorStream_.str();
7590 // Determine whether byte-swaping is necessary.
7591 stream_.doByteSwap[mode] = false;
7592 if ( deviceFormat != SND_PCM_FORMAT_S8 ) {
7593 result = snd_pcm_format_cpu_endian( deviceFormat );
7595 stream_.doByteSwap[mode] = true;
7596 else if (result < 0) {
7597 snd_pcm_close( phandle );
7598 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") endian-ness, " << snd_strerror( result ) << ".";
7599 errorText_ = errorStream_.str();
7604 // Set the sample rate.
7605 result = snd_pcm_hw_params_set_rate_near( phandle, hw_params, (unsigned int*) &sampleRate, 0 );
7607 snd_pcm_close( phandle );
7608 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting sample rate on device (" << name << "), " << snd_strerror( result ) << ".";
7609 errorText_ = errorStream_.str();
7613 // Determine the number of channels for this device. We support a possible
7614 // minimum device channel number > than the value requested by the user.
7615 stream_.nUserChannels[mode] = channels;
7617 result = snd_pcm_hw_params_get_channels_max( hw_params, &value );
7618 unsigned int deviceChannels = value;
7619 if ( result < 0 || deviceChannels < channels + firstChannel ) {
7620 snd_pcm_close( phandle );
7621 errorStream_ << "RtApiAlsa::probeDeviceOpen: requested channel parameters not supported by device (" << name << "), " << snd_strerror( result ) << ".";
7622 errorText_ = errorStream_.str();
7626 result = snd_pcm_hw_params_get_channels_min( hw_params, &value );
7628 snd_pcm_close( phandle );
7629 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting minimum channels for device (" << name << "), " << snd_strerror( result ) << ".";
7630 errorText_ = errorStream_.str();
7633 deviceChannels = value;
7634 if ( deviceChannels < channels + firstChannel ) deviceChannels = channels + firstChannel;
7635 stream_.nDeviceChannels[mode] = deviceChannels;
7637 // Set the device channels.
7638 result = snd_pcm_hw_params_set_channels( phandle, hw_params, deviceChannels );
7640 snd_pcm_close( phandle );
7641 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting channels for device (" << name << "), " << snd_strerror( result ) << ".";
7642 errorText_ = errorStream_.str();
7646 // Set the buffer (or period) size.
7648 snd_pcm_uframes_t periodSize = *bufferSize;
7649 result = snd_pcm_hw_params_set_period_size_near( phandle, hw_params, &periodSize, &dir );
7651 snd_pcm_close( phandle );
7652 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting period size for device (" << name << "), " << snd_strerror( result ) << ".";
7653 errorText_ = errorStream_.str();
7656 *bufferSize = periodSize;
7658 // Set the buffer number, which in ALSA is referred to as the "period".
7659 unsigned int periods = 0;
7660 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) periods = 2;
7661 if ( options && options->numberOfBuffers > 0 ) periods = options->numberOfBuffers;
7662 if ( periods < 2 ) periods = 4; // a fairly safe default value
7663 result = snd_pcm_hw_params_set_periods_near( phandle, hw_params, &periods, &dir );
7665 snd_pcm_close( phandle );
7666 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting periods for device (" << name << "), " << snd_strerror( result ) << ".";
7667 errorText_ = errorStream_.str();
7671 // If attempting to setup a duplex stream, the bufferSize parameter
7672 // MUST be the same in both directions!
7673 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
7674 snd_pcm_close( phandle );
7675 errorStream_ << "RtApiAlsa::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << name << ").";
7676 errorText_ = errorStream_.str();
7680 stream_.bufferSize = *bufferSize;
7682 // Install the hardware configuration
7683 result = snd_pcm_hw_params( phandle, hw_params );
7685 snd_pcm_close( phandle );
7686 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing hardware configuration on device (" << name << "), " << snd_strerror( result ) << ".";
7687 errorText_ = errorStream_.str();
7691 #if defined(__RTAUDIO_DEBUG__)
7692 fprintf(stderr, "\nRtApiAlsa: dump hardware params after installation:\n\n");
7693 snd_pcm_hw_params_dump( hw_params, out );
7696 // Set the software configuration to fill buffers with zeros and prevent device stopping on xruns.
7697 snd_pcm_sw_params_t *sw_params = NULL;
7698 snd_pcm_sw_params_alloca( &sw_params );
7699 snd_pcm_sw_params_current( phandle, sw_params );
7700 snd_pcm_sw_params_set_start_threshold( phandle, sw_params, *bufferSize );
7701 snd_pcm_sw_params_set_stop_threshold( phandle, sw_params, ULONG_MAX );
7702 snd_pcm_sw_params_set_silence_threshold( phandle, sw_params, 0 );
7704 // The following two settings were suggested by Theo Veenker
7705 //snd_pcm_sw_params_set_avail_min( phandle, sw_params, *bufferSize );
7706 //snd_pcm_sw_params_set_xfer_align( phandle, sw_params, 1 );
7708 // here are two options for a fix
7709 //snd_pcm_sw_params_set_silence_size( phandle, sw_params, ULONG_MAX );
7710 snd_pcm_uframes_t val;
7711 snd_pcm_sw_params_get_boundary( sw_params, &val );
7712 snd_pcm_sw_params_set_silence_size( phandle, sw_params, val );
7714 result = snd_pcm_sw_params( phandle, sw_params );
7716 snd_pcm_close( phandle );
7717 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing software configuration on device (" << name << "), " << snd_strerror( result ) << ".";
7718 errorText_ = errorStream_.str();
7722 #if defined(__RTAUDIO_DEBUG__)
7723 fprintf(stderr, "\nRtApiAlsa: dump software params after installation:\n\n");
7724 snd_pcm_sw_params_dump( sw_params, out );
7727 // Set flags for buffer conversion
7728 stream_.doConvertBuffer[mode] = false;
7729 if ( stream_.userFormat != stream_.deviceFormat[mode] )
7730 stream_.doConvertBuffer[mode] = true;
7731 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
7732 stream_.doConvertBuffer[mode] = true;
7733 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
7734 stream_.nUserChannels[mode] > 1 )
7735 stream_.doConvertBuffer[mode] = true;
7737 // Allocate the ApiHandle if necessary and then save.
7738 AlsaHandle *apiInfo = 0;
7739 if ( stream_.apiHandle == 0 ) {
7741 apiInfo = (AlsaHandle *) new AlsaHandle;
7743 catch ( std::bad_alloc& ) {
7744 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating AlsaHandle memory.";
7748 if ( pthread_cond_init( &apiInfo->runnable_cv, NULL ) ) {
7749 errorText_ = "RtApiAlsa::probeDeviceOpen: error initializing pthread condition variable.";
7753 stream_.apiHandle = (void *) apiInfo;
7754 apiInfo->handles[0] = 0;
7755 apiInfo->handles[1] = 0;
7758 apiInfo = (AlsaHandle *) stream_.apiHandle;
7760 apiInfo->handles[mode] = phandle;
7763 // Allocate necessary internal buffers.
7764 unsigned long bufferBytes;
7765 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
7766 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
7767 if ( stream_.userBuffer[mode] == NULL ) {
7768 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating user buffer memory.";
7772 if ( stream_.doConvertBuffer[mode] ) {
7774 bool makeBuffer = true;
7775 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
7776 if ( mode == INPUT ) {
7777 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
7778 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
7779 if ( bufferBytes <= bytesOut ) makeBuffer = false;
7784 bufferBytes *= *bufferSize;
7785 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
7786 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
7787 if ( stream_.deviceBuffer == NULL ) {
7788 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating device buffer memory.";
7794 stream_.sampleRate = sampleRate;
7795 stream_.nBuffers = periods;
7796 stream_.device[mode] = device;
7797 stream_.state = STREAM_STOPPED;
7799 // Setup the buffer conversion information structure.
7800 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
7802 // Setup thread if necessary.
7803 if ( stream_.mode == OUTPUT && mode == INPUT ) {
7804 // We had already set up an output stream.
7805 stream_.mode = DUPLEX;
7806 // Link the streams if possible.
7807 apiInfo->synchronized = false;
7808 if ( snd_pcm_link( apiInfo->handles[0], apiInfo->handles[1] ) == 0 )
7809 apiInfo->synchronized = true;
7811 errorText_ = "RtApiAlsa::probeDeviceOpen: unable to synchronize input and output devices.";
7812 error( RtAudioError::WARNING );
7816 stream_.mode = mode;
7818 // Setup callback thread.
7819 stream_.callbackInfo.object = (void *) this;
7821 // Set the thread attributes for joinable and realtime scheduling
7822 // priority (optional). The higher priority will only take affect
7823 // if the program is run as root or suid. Note, under Linux
7824 // processes with CAP_SYS_NICE privilege, a user can change
7825 // scheduling policy and priority (thus need not be root). See
7826 // POSIX "capabilities".
7827 pthread_attr_t attr;
7828 pthread_attr_init( &attr );
7829 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
7830 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
7831 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
7832 stream_.callbackInfo.doRealtime = true;
7833 struct sched_param param;
7834 int priority = options->priority;
7835 int min = sched_get_priority_min( SCHED_RR );
7836 int max = sched_get_priority_max( SCHED_RR );
7837 if ( priority < min ) priority = min;
7838 else if ( priority > max ) priority = max;
7839 param.sched_priority = priority;
7841 // Set the policy BEFORE the priority. Otherwise it fails.
7842 pthread_attr_setschedpolicy(&attr, SCHED_RR);
7843 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
7844 // This is definitely required. Otherwise it fails.
7845 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
7846 pthread_attr_setschedparam(&attr, ¶m);
7849 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
7851 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
7854 stream_.callbackInfo.isRunning = true;
7855 result = pthread_create( &stream_.callbackInfo.thread, &attr, alsaCallbackHandler, &stream_.callbackInfo );
7856 pthread_attr_destroy( &attr );
7858 // Failed. Try instead with default attributes.
7859 result = pthread_create( &stream_.callbackInfo.thread, NULL, alsaCallbackHandler, &stream_.callbackInfo );
7861 stream_.callbackInfo.isRunning = false;
7862 errorText_ = "RtApiAlsa::error creating callback thread!";
7872 pthread_cond_destroy( &apiInfo->runnable_cv );
7873 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
7874 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
7876 stream_.apiHandle = 0;
7879 if ( phandle) snd_pcm_close( phandle );
7881 for ( int i=0; i<2; i++ ) {
7882 if ( stream_.userBuffer[i] ) {
7883 free( stream_.userBuffer[i] );
7884 stream_.userBuffer[i] = 0;
7888 if ( stream_.deviceBuffer ) {
7889 free( stream_.deviceBuffer );
7890 stream_.deviceBuffer = 0;
7893 stream_.state = STREAM_CLOSED;
7897 void RtApiAlsa :: closeStream()
7899 if ( stream_.state == STREAM_CLOSED ) {
7900 errorText_ = "RtApiAlsa::closeStream(): no open stream to close!";
7901 error( RtAudioError::WARNING );
7905 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
7906 stream_.callbackInfo.isRunning = false;
7907 MUTEX_LOCK( &stream_.mutex );
7908 if ( stream_.state == STREAM_STOPPED ) {
7909 apiInfo->runnable = true;
7910 pthread_cond_signal( &apiInfo->runnable_cv );
7912 MUTEX_UNLOCK( &stream_.mutex );
7913 pthread_join( stream_.callbackInfo.thread, NULL );
7915 if ( stream_.state == STREAM_RUNNING ) {
7916 stream_.state = STREAM_STOPPED;
7917 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
7918 snd_pcm_drop( apiInfo->handles[0] );
7919 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
7920 snd_pcm_drop( apiInfo->handles[1] );
7924 pthread_cond_destroy( &apiInfo->runnable_cv );
7925 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
7926 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
7928 stream_.apiHandle = 0;
7931 for ( int i=0; i<2; i++ ) {
7932 if ( stream_.userBuffer[i] ) {
7933 free( stream_.userBuffer[i] );
7934 stream_.userBuffer[i] = 0;
7938 if ( stream_.deviceBuffer ) {
7939 free( stream_.deviceBuffer );
7940 stream_.deviceBuffer = 0;
7943 stream_.mode = UNINITIALIZED;
7944 stream_.state = STREAM_CLOSED;
7947 void RtApiAlsa :: startStream()
7949 // This method calls snd_pcm_prepare if the device isn't already in that state.
7952 if ( stream_.state == STREAM_RUNNING ) {
7953 errorText_ = "RtApiAlsa::startStream(): the stream is already running!";
7954 error( RtAudioError::WARNING );
7958 MUTEX_LOCK( &stream_.mutex );
7961 snd_pcm_state_t state;
7962 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
7963 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
7964 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
7965 state = snd_pcm_state( handle[0] );
7966 if ( state != SND_PCM_STATE_PREPARED ) {
7967 result = snd_pcm_prepare( handle[0] );
7969 errorStream_ << "RtApiAlsa::startStream: error preparing output pcm device, " << snd_strerror( result ) << ".";
7970 errorText_ = errorStream_.str();
7976 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
7977 result = snd_pcm_drop(handle[1]); // fix to remove stale data received since device has been open
7978 state = snd_pcm_state( handle[1] );
7979 if ( state != SND_PCM_STATE_PREPARED ) {
7980 result = snd_pcm_prepare( handle[1] );
7982 errorStream_ << "RtApiAlsa::startStream: error preparing input pcm device, " << snd_strerror( result ) << ".";
7983 errorText_ = errorStream_.str();
7989 stream_.state = STREAM_RUNNING;
7992 apiInfo->runnable = true;
7993 pthread_cond_signal( &apiInfo->runnable_cv );
7994 MUTEX_UNLOCK( &stream_.mutex );
7996 if ( result >= 0 ) return;
7997 error( RtAudioError::SYSTEM_ERROR );
8000 void RtApiAlsa :: stopStream()
8003 if ( stream_.state == STREAM_STOPPED ) {
8004 errorText_ = "RtApiAlsa::stopStream(): the stream is already stopped!";
8005 error( RtAudioError::WARNING );
8009 stream_.state = STREAM_STOPPED;
8010 MUTEX_LOCK( &stream_.mutex );
8013 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8014 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
8015 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8016 if ( apiInfo->synchronized )
8017 result = snd_pcm_drop( handle[0] );
8019 result = snd_pcm_drain( handle[0] );
8021 errorStream_ << "RtApiAlsa::stopStream: error draining output pcm device, " << snd_strerror( result ) << ".";
8022 errorText_ = errorStream_.str();
8027 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
8028 result = snd_pcm_drop( handle[1] );
8030 errorStream_ << "RtApiAlsa::stopStream: error stopping input pcm device, " << snd_strerror( result ) << ".";
8031 errorText_ = errorStream_.str();
8037 apiInfo->runnable = false; // fixes high CPU usage when stopped
8038 MUTEX_UNLOCK( &stream_.mutex );
8040 if ( result >= 0 ) return;
8041 error( RtAudioError::SYSTEM_ERROR );
8044 void RtApiAlsa :: abortStream()
8047 if ( stream_.state == STREAM_STOPPED ) {
8048 errorText_ = "RtApiAlsa::abortStream(): the stream is already stopped!";
8049 error( RtAudioError::WARNING );
8053 stream_.state = STREAM_STOPPED;
8054 MUTEX_LOCK( &stream_.mutex );
8057 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8058 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
8059 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8060 result = snd_pcm_drop( handle[0] );
8062 errorStream_ << "RtApiAlsa::abortStream: error aborting output pcm device, " << snd_strerror( result ) << ".";
8063 errorText_ = errorStream_.str();
8068 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
8069 result = snd_pcm_drop( handle[1] );
8071 errorStream_ << "RtApiAlsa::abortStream: error aborting input pcm device, " << snd_strerror( result ) << ".";
8072 errorText_ = errorStream_.str();
8078 apiInfo->runnable = false; // fixes high CPU usage when stopped
8079 MUTEX_UNLOCK( &stream_.mutex );
8081 if ( result >= 0 ) return;
8082 error( RtAudioError::SYSTEM_ERROR );
8085 void RtApiAlsa :: callbackEvent()
8087 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
8088 if ( stream_.state == STREAM_STOPPED ) {
8089 MUTEX_LOCK( &stream_.mutex );
8090 while ( !apiInfo->runnable )
8091 pthread_cond_wait( &apiInfo->runnable_cv, &stream_.mutex );
8093 if ( stream_.state != STREAM_RUNNING ) {
8094 MUTEX_UNLOCK( &stream_.mutex );
8097 MUTEX_UNLOCK( &stream_.mutex );
8100 if ( stream_.state == STREAM_CLOSED ) {
8101 errorText_ = "RtApiAlsa::callbackEvent(): the stream is closed ... this shouldn't happen!";
8102 error( RtAudioError::WARNING );
8106 int doStopStream = 0;
8107 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
8108 double streamTime = getStreamTime();
8109 RtAudioStreamStatus status = 0;
8110 if ( stream_.mode != INPUT && apiInfo->xrun[0] == true ) {
8111 status |= RTAUDIO_OUTPUT_UNDERFLOW;
8112 apiInfo->xrun[0] = false;
8114 if ( stream_.mode != OUTPUT && apiInfo->xrun[1] == true ) {
8115 status |= RTAUDIO_INPUT_OVERFLOW;
8116 apiInfo->xrun[1] = false;
8118 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
8119 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
8121 if ( doStopStream == 2 ) {
8126 MUTEX_LOCK( &stream_.mutex );
8128 // The state might change while waiting on a mutex.
8129 if ( stream_.state == STREAM_STOPPED ) goto unlock;
8135 snd_pcm_sframes_t frames;
8136 RtAudioFormat format;
8137 handle = (snd_pcm_t **) apiInfo->handles;
8139 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
8141 // Setup parameters.
8142 if ( stream_.doConvertBuffer[1] ) {
8143 buffer = stream_.deviceBuffer;
8144 channels = stream_.nDeviceChannels[1];
8145 format = stream_.deviceFormat[1];
8148 buffer = stream_.userBuffer[1];
8149 channels = stream_.nUserChannels[1];
8150 format = stream_.userFormat;
8153 // Read samples from device in interleaved/non-interleaved format.
8154 if ( stream_.deviceInterleaved[1] )
8155 result = snd_pcm_readi( handle[1], buffer, stream_.bufferSize );
8157 void *bufs[channels];
8158 size_t offset = stream_.bufferSize * formatBytes( format );
8159 for ( int i=0; i<channels; i++ )
8160 bufs[i] = (void *) (buffer + (i * offset));
8161 result = snd_pcm_readn( handle[1], bufs, stream_.bufferSize );
8164 if ( result < (int) stream_.bufferSize ) {
8165 // Either an error or overrun occured.
8166 if ( result == -EPIPE ) {
8167 snd_pcm_state_t state = snd_pcm_state( handle[1] );
8168 if ( state == SND_PCM_STATE_XRUN ) {
8169 apiInfo->xrun[1] = true;
8170 result = snd_pcm_prepare( handle[1] );
8172 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after overrun, " << snd_strerror( result ) << ".";
8173 errorText_ = errorStream_.str();
8177 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
8178 errorText_ = errorStream_.str();
8182 errorStream_ << "RtApiAlsa::callbackEvent: audio read error, " << snd_strerror( result ) << ".";
8183 errorText_ = errorStream_.str();
8185 error( RtAudioError::WARNING );
8189 // Do byte swapping if necessary.
8190 if ( stream_.doByteSwap[1] )
8191 byteSwapBuffer( buffer, stream_.bufferSize * channels, format );
8193 // Do buffer conversion if necessary.
8194 if ( stream_.doConvertBuffer[1] )
8195 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
8197 // Check stream latency
8198 result = snd_pcm_delay( handle[1], &frames );
8199 if ( result == 0 && frames > 0 ) stream_.latency[1] = frames;
8204 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8206 // Setup parameters and do buffer conversion if necessary.
8207 if ( stream_.doConvertBuffer[0] ) {
8208 buffer = stream_.deviceBuffer;
8209 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
8210 channels = stream_.nDeviceChannels[0];
8211 format = stream_.deviceFormat[0];
8214 buffer = stream_.userBuffer[0];
8215 channels = stream_.nUserChannels[0];
8216 format = stream_.userFormat;
8219 // Do byte swapping if necessary.
8220 if ( stream_.doByteSwap[0] )
8221 byteSwapBuffer(buffer, stream_.bufferSize * channels, format);
8223 // Write samples to device in interleaved/non-interleaved format.
8224 if ( stream_.deviceInterleaved[0] )
8225 result = snd_pcm_writei( handle[0], buffer, stream_.bufferSize );
8227 void *bufs[channels];
8228 size_t offset = stream_.bufferSize * formatBytes( format );
8229 for ( int i=0; i<channels; i++ )
8230 bufs[i] = (void *) (buffer + (i * offset));
8231 result = snd_pcm_writen( handle[0], bufs, stream_.bufferSize );
8234 if ( result < (int) stream_.bufferSize ) {
8235 // Either an error or underrun occured.
8236 if ( result == -EPIPE ) {
8237 snd_pcm_state_t state = snd_pcm_state( handle[0] );
8238 if ( state == SND_PCM_STATE_XRUN ) {
8239 apiInfo->xrun[0] = true;
8240 result = snd_pcm_prepare( handle[0] );
8242 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after underrun, " << snd_strerror( result ) << ".";
8243 errorText_ = errorStream_.str();
8246 errorText_ = "RtApiAlsa::callbackEvent: audio write error, underrun.";
8249 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
8250 errorText_ = errorStream_.str();
8254 errorStream_ << "RtApiAlsa::callbackEvent: audio write error, " << snd_strerror( result ) << ".";
8255 errorText_ = errorStream_.str();
8257 error( RtAudioError::WARNING );
8261 // Check stream latency
8262 result = snd_pcm_delay( handle[0], &frames );
8263 if ( result == 0 && frames > 0 ) stream_.latency[0] = frames;
8267 MUTEX_UNLOCK( &stream_.mutex );
8269 RtApi::tickStreamTime();
8270 if ( doStopStream == 1 ) this->stopStream();
8273 static void *alsaCallbackHandler( void *ptr )
8275 CallbackInfo *info = (CallbackInfo *) ptr;
8276 RtApiAlsa *object = (RtApiAlsa *) info->object;
8277 bool *isRunning = &info->isRunning;
8279 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
8280 if ( info->doRealtime ) {
8281 std::cerr << "RtAudio alsa: " <<
8282 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
8283 "running realtime scheduling" << std::endl;
8287 while ( *isRunning == true ) {
8288 pthread_testcancel();
8289 object->callbackEvent();
8292 pthread_exit( NULL );
8295 //******************** End of __LINUX_ALSA__ *********************//
8298 #if defined(__LINUX_PULSE__)
8300 // Code written by Peter Meerwald, pmeerw@pmeerw.net
8301 // and Tristan Matthews.
8303 #include <pulse/error.h>
8304 #include <pulse/simple.h>
8307 static const unsigned int SUPPORTED_SAMPLERATES[] = { 8000, 16000, 22050, 32000,
8308 44100, 48000, 96000, 0};
8310 struct rtaudio_pa_format_mapping_t {
8311 RtAudioFormat rtaudio_format;
8312 pa_sample_format_t pa_format;
8315 static const rtaudio_pa_format_mapping_t supported_sampleformats[] = {
8316 {RTAUDIO_SINT16, PA_SAMPLE_S16LE},
8317 {RTAUDIO_SINT32, PA_SAMPLE_S32LE},
8318 {RTAUDIO_FLOAT32, PA_SAMPLE_FLOAT32LE},
8319 {0, PA_SAMPLE_INVALID}};
8321 struct PulseAudioHandle {
8325 pthread_cond_t runnable_cv;
8327 PulseAudioHandle() : s_play(0), s_rec(0), runnable(false) { }
8330 RtApiPulse::~RtApiPulse()
8332 if ( stream_.state != STREAM_CLOSED )
8336 unsigned int RtApiPulse::getDeviceCount( void )
8341 RtAudio::DeviceInfo RtApiPulse::getDeviceInfo( unsigned int /*device*/ )
8343 RtAudio::DeviceInfo info;
8345 info.name = "PulseAudio";
8346 info.outputChannels = 2;
8347 info.inputChannels = 2;
8348 info.duplexChannels = 2;
8349 info.isDefaultOutput = true;
8350 info.isDefaultInput = true;
8352 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr )
8353 info.sampleRates.push_back( *sr );
8355 info.preferredSampleRate = 48000;
8356 info.nativeFormats = RTAUDIO_SINT16 | RTAUDIO_SINT32 | RTAUDIO_FLOAT32;
8361 static void *pulseaudio_callback( void * user )
8363 CallbackInfo *cbi = static_cast<CallbackInfo *>( user );
8364 RtApiPulse *context = static_cast<RtApiPulse *>( cbi->object );
8365 volatile bool *isRunning = &cbi->isRunning;
8367 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
8368 if (cbi->doRealtime) {
8369 std::cerr << "RtAudio pulse: " <<
8370 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
8371 "running realtime scheduling" << std::endl;
8375 while ( *isRunning ) {
8376 pthread_testcancel();
8377 context->callbackEvent();
8380 pthread_exit( NULL );
8383 void RtApiPulse::closeStream( void )
8385 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8387 stream_.callbackInfo.isRunning = false;
8389 MUTEX_LOCK( &stream_.mutex );
8390 if ( stream_.state == STREAM_STOPPED ) {
8391 pah->runnable = true;
8392 pthread_cond_signal( &pah->runnable_cv );
8394 MUTEX_UNLOCK( &stream_.mutex );
8396 pthread_join( pah->thread, 0 );
8397 if ( pah->s_play ) {
8398 pa_simple_flush( pah->s_play, NULL );
8399 pa_simple_free( pah->s_play );
8402 pa_simple_free( pah->s_rec );
8404 pthread_cond_destroy( &pah->runnable_cv );
8406 stream_.apiHandle = 0;
8409 if ( stream_.userBuffer[0] ) {
8410 free( stream_.userBuffer[0] );
8411 stream_.userBuffer[0] = 0;
8413 if ( stream_.userBuffer[1] ) {
8414 free( stream_.userBuffer[1] );
8415 stream_.userBuffer[1] = 0;
8418 stream_.state = STREAM_CLOSED;
8419 stream_.mode = UNINITIALIZED;
8422 void RtApiPulse::callbackEvent( void )
8424 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8426 if ( stream_.state == STREAM_STOPPED ) {
8427 MUTEX_LOCK( &stream_.mutex );
8428 while ( !pah->runnable )
8429 pthread_cond_wait( &pah->runnable_cv, &stream_.mutex );
8431 if ( stream_.state != STREAM_RUNNING ) {
8432 MUTEX_UNLOCK( &stream_.mutex );
8435 MUTEX_UNLOCK( &stream_.mutex );
8438 if ( stream_.state == STREAM_CLOSED ) {
8439 errorText_ = "RtApiPulse::callbackEvent(): the stream is closed ... "
8440 "this shouldn't happen!";
8441 error( RtAudioError::WARNING );
8445 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
8446 double streamTime = getStreamTime();
8447 RtAudioStreamStatus status = 0;
8448 int doStopStream = callback( stream_.userBuffer[OUTPUT], stream_.userBuffer[INPUT],
8449 stream_.bufferSize, streamTime, status,
8450 stream_.callbackInfo.userData );
8452 if ( doStopStream == 2 ) {
8457 MUTEX_LOCK( &stream_.mutex );
8458 void *pulse_in = stream_.doConvertBuffer[INPUT] ? stream_.deviceBuffer : stream_.userBuffer[INPUT];
8459 void *pulse_out = stream_.doConvertBuffer[OUTPUT] ? stream_.deviceBuffer : stream_.userBuffer[OUTPUT];
8461 if ( stream_.state != STREAM_RUNNING )
8466 if (stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8467 if ( stream_.doConvertBuffer[OUTPUT] ) {
8468 convertBuffer( stream_.deviceBuffer,
8469 stream_.userBuffer[OUTPUT],
8470 stream_.convertInfo[OUTPUT] );
8471 bytes = stream_.nDeviceChannels[OUTPUT] * stream_.bufferSize *
8472 formatBytes( stream_.deviceFormat[OUTPUT] );
8474 bytes = stream_.nUserChannels[OUTPUT] * stream_.bufferSize *
8475 formatBytes( stream_.userFormat );
8477 if ( pa_simple_write( pah->s_play, pulse_out, bytes, &pa_error ) < 0 ) {
8478 errorStream_ << "RtApiPulse::callbackEvent: audio write error, " <<
8479 pa_strerror( pa_error ) << ".";
8480 errorText_ = errorStream_.str();
8481 error( RtAudioError::WARNING );
8485 if ( stream_.mode == INPUT || stream_.mode == DUPLEX) {
8486 if ( stream_.doConvertBuffer[INPUT] )
8487 bytes = stream_.nDeviceChannels[INPUT] * stream_.bufferSize *
8488 formatBytes( stream_.deviceFormat[INPUT] );
8490 bytes = stream_.nUserChannels[INPUT] * stream_.bufferSize *
8491 formatBytes( stream_.userFormat );
8493 if ( pa_simple_read( pah->s_rec, pulse_in, bytes, &pa_error ) < 0 ) {
8494 errorStream_ << "RtApiPulse::callbackEvent: audio read error, " <<
8495 pa_strerror( pa_error ) << ".";
8496 errorText_ = errorStream_.str();
8497 error( RtAudioError::WARNING );
8499 if ( stream_.doConvertBuffer[INPUT] ) {
8500 convertBuffer( stream_.userBuffer[INPUT],
8501 stream_.deviceBuffer,
8502 stream_.convertInfo[INPUT] );
8507 MUTEX_UNLOCK( &stream_.mutex );
8508 RtApi::tickStreamTime();
8510 if ( doStopStream == 1 )
8514 void RtApiPulse::startStream( void )
8516 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8518 if ( stream_.state == STREAM_CLOSED ) {
8519 errorText_ = "RtApiPulse::startStream(): the stream is not open!";
8520 error( RtAudioError::INVALID_USE );
8523 if ( stream_.state == STREAM_RUNNING ) {
8524 errorText_ = "RtApiPulse::startStream(): the stream is already running!";
8525 error( RtAudioError::WARNING );
8529 MUTEX_LOCK( &stream_.mutex );
8531 stream_.state = STREAM_RUNNING;
8533 pah->runnable = true;
8534 pthread_cond_signal( &pah->runnable_cv );
8535 MUTEX_UNLOCK( &stream_.mutex );
8538 void RtApiPulse::stopStream( void )
8540 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8542 if ( stream_.state == STREAM_CLOSED ) {
8543 errorText_ = "RtApiPulse::stopStream(): the stream is not open!";
8544 error( RtAudioError::INVALID_USE );
8547 if ( stream_.state == STREAM_STOPPED ) {
8548 errorText_ = "RtApiPulse::stopStream(): the stream is already stopped!";
8549 error( RtAudioError::WARNING );
8553 stream_.state = STREAM_STOPPED;
8554 MUTEX_LOCK( &stream_.mutex );
8556 if ( pah && pah->s_play ) {
8558 if ( pa_simple_drain( pah->s_play, &pa_error ) < 0 ) {
8559 errorStream_ << "RtApiPulse::stopStream: error draining output device, " <<
8560 pa_strerror( pa_error ) << ".";
8561 errorText_ = errorStream_.str();
8562 MUTEX_UNLOCK( &stream_.mutex );
8563 error( RtAudioError::SYSTEM_ERROR );
8568 stream_.state = STREAM_STOPPED;
8569 MUTEX_UNLOCK( &stream_.mutex );
8572 void RtApiPulse::abortStream( void )
8574 PulseAudioHandle *pah = static_cast<PulseAudioHandle*>( stream_.apiHandle );
8576 if ( stream_.state == STREAM_CLOSED ) {
8577 errorText_ = "RtApiPulse::abortStream(): the stream is not open!";
8578 error( RtAudioError::INVALID_USE );
8581 if ( stream_.state == STREAM_STOPPED ) {
8582 errorText_ = "RtApiPulse::abortStream(): the stream is already stopped!";
8583 error( RtAudioError::WARNING );
8587 stream_.state = STREAM_STOPPED;
8588 MUTEX_LOCK( &stream_.mutex );
8590 if ( pah && pah->s_play ) {
8592 if ( pa_simple_flush( pah->s_play, &pa_error ) < 0 ) {
8593 errorStream_ << "RtApiPulse::abortStream: error flushing output device, " <<
8594 pa_strerror( pa_error ) << ".";
8595 errorText_ = errorStream_.str();
8596 MUTEX_UNLOCK( &stream_.mutex );
8597 error( RtAudioError::SYSTEM_ERROR );
8602 stream_.state = STREAM_STOPPED;
8603 MUTEX_UNLOCK( &stream_.mutex );
8606 bool RtApiPulse::probeDeviceOpen( unsigned int device, StreamMode mode,
8607 unsigned int channels, unsigned int firstChannel,
8608 unsigned int sampleRate, RtAudioFormat format,
8609 unsigned int *bufferSize, RtAudio::StreamOptions *options )
8611 PulseAudioHandle *pah = 0;
8612 unsigned long bufferBytes = 0;
8615 if ( device != 0 ) return false;
8616 if ( mode != INPUT && mode != OUTPUT ) return false;
8617 if ( channels != 1 && channels != 2 ) {
8618 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported number of channels.";
8621 ss.channels = channels;
8623 if ( firstChannel != 0 ) return false;
8625 bool sr_found = false;
8626 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr ) {
8627 if ( sampleRate == *sr ) {
8629 stream_.sampleRate = sampleRate;
8630 ss.rate = sampleRate;
8635 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported sample rate.";
8640 for ( const rtaudio_pa_format_mapping_t *sf = supported_sampleformats;
8641 sf->rtaudio_format && sf->pa_format != PA_SAMPLE_INVALID; ++sf ) {
8642 if ( format == sf->rtaudio_format ) {
8644 stream_.userFormat = sf->rtaudio_format;
8645 stream_.deviceFormat[mode] = stream_.userFormat;
8646 ss.format = sf->pa_format;
8650 if ( !sf_found ) { // Use internal data format conversion.
8651 stream_.userFormat = format;
8652 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
8653 ss.format = PA_SAMPLE_FLOAT32LE;
8656 // Set other stream parameters.
8657 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
8658 else stream_.userInterleaved = true;
8659 stream_.deviceInterleaved[mode] = true;
8660 stream_.nBuffers = 1;
8661 stream_.doByteSwap[mode] = false;
8662 stream_.nUserChannels[mode] = channels;
8663 stream_.nDeviceChannels[mode] = channels + firstChannel;
8664 stream_.channelOffset[mode] = 0;
8665 std::string streamName = "RtAudio";
8667 // Set flags for buffer conversion.
8668 stream_.doConvertBuffer[mode] = false;
8669 if ( stream_.userFormat != stream_.deviceFormat[mode] )
8670 stream_.doConvertBuffer[mode] = true;
8671 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
8672 stream_.doConvertBuffer[mode] = true;
8674 // Allocate necessary internal buffers.
8675 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
8676 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
8677 if ( stream_.userBuffer[mode] == NULL ) {
8678 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating user buffer memory.";
8681 stream_.bufferSize = *bufferSize;
8683 if ( stream_.doConvertBuffer[mode] ) {
8685 bool makeBuffer = true;
8686 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
8687 if ( mode == INPUT ) {
8688 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
8689 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
8690 if ( bufferBytes <= bytesOut ) makeBuffer = false;
8695 bufferBytes *= *bufferSize;
8696 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
8697 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
8698 if ( stream_.deviceBuffer == NULL ) {
8699 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating device buffer memory.";
8705 stream_.device[mode] = device;
8707 // Setup the buffer conversion information structure.
8708 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
8710 if ( !stream_.apiHandle ) {
8711 PulseAudioHandle *pah = new PulseAudioHandle;
8713 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating memory for handle.";
8717 stream_.apiHandle = pah;
8718 if ( pthread_cond_init( &pah->runnable_cv, NULL ) != 0 ) {
8719 errorText_ = "RtApiPulse::probeDeviceOpen: error creating condition variable.";
8723 pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8726 if ( options && !options->streamName.empty() ) streamName = options->streamName;
8729 pa_buffer_attr buffer_attr;
8730 buffer_attr.fragsize = bufferBytes;
8731 buffer_attr.maxlength = -1;
8733 pah->s_rec = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_RECORD, NULL, "Record", &ss, NULL, &buffer_attr, &error );
8734 if ( !pah->s_rec ) {
8735 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting input to PulseAudio server.";
8740 pah->s_play = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_PLAYBACK, NULL, "Playback", &ss, NULL, NULL, &error );
8741 if ( !pah->s_play ) {
8742 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting output to PulseAudio server.";
8750 if ( stream_.mode == UNINITIALIZED )
8751 stream_.mode = mode;
8752 else if ( stream_.mode == mode )
8755 stream_.mode = DUPLEX;
8757 if ( !stream_.callbackInfo.isRunning ) {
8758 stream_.callbackInfo.object = this;
8760 stream_.state = STREAM_STOPPED;
8761 // Set the thread attributes for joinable and realtime scheduling
8762 // priority (optional). The higher priority will only take affect
8763 // if the program is run as root or suid. Note, under Linux
8764 // processes with CAP_SYS_NICE privilege, a user can change
8765 // scheduling policy and priority (thus need not be root). See
8766 // POSIX "capabilities".
8767 pthread_attr_t attr;
8768 pthread_attr_init( &attr );
8769 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
8770 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
8771 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
8772 stream_.callbackInfo.doRealtime = true;
8773 struct sched_param param;
8774 int priority = options->priority;
8775 int min = sched_get_priority_min( SCHED_RR );
8776 int max = sched_get_priority_max( SCHED_RR );
8777 if ( priority < min ) priority = min;
8778 else if ( priority > max ) priority = max;
8779 param.sched_priority = priority;
8781 // Set the policy BEFORE the priority. Otherwise it fails.
8782 pthread_attr_setschedpolicy(&attr, SCHED_RR);
8783 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
8784 // This is definitely required. Otherwise it fails.
8785 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
8786 pthread_attr_setschedparam(&attr, ¶m);
8789 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
8791 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
8794 stream_.callbackInfo.isRunning = true;
8795 int result = pthread_create( &pah->thread, &attr, pulseaudio_callback, (void *)&stream_.callbackInfo);
8796 pthread_attr_destroy(&attr);
8798 // Failed. Try instead with default attributes.
8799 result = pthread_create( &pah->thread, NULL, pulseaudio_callback, (void *)&stream_.callbackInfo);
8801 stream_.callbackInfo.isRunning = false;
8802 errorText_ = "RtApiPulse::probeDeviceOpen: error creating thread.";
8811 if ( pah && stream_.callbackInfo.isRunning ) {
8812 pthread_cond_destroy( &pah->runnable_cv );
8814 stream_.apiHandle = 0;
8817 for ( int i=0; i<2; i++ ) {
8818 if ( stream_.userBuffer[i] ) {
8819 free( stream_.userBuffer[i] );
8820 stream_.userBuffer[i] = 0;
8824 if ( stream_.deviceBuffer ) {
8825 free( stream_.deviceBuffer );
8826 stream_.deviceBuffer = 0;
8829 stream_.state = STREAM_CLOSED;
8833 //******************** End of __LINUX_PULSE__ *********************//
8836 #if defined(__LINUX_OSS__)
8839 #include <sys/ioctl.h>
8842 #include <sys/soundcard.h>
8846 static void *ossCallbackHandler(void * ptr);
8848 // A structure to hold various information related to the OSS API
8851 int id[2]; // device ids
8854 pthread_cond_t runnable;
8857 :triggered(false) { id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
8860 RtApiOss :: RtApiOss()
8862 // Nothing to do here.
8865 RtApiOss :: ~RtApiOss()
8867 if ( stream_.state != STREAM_CLOSED ) closeStream();
8870 unsigned int RtApiOss :: getDeviceCount( void )
8872 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
8873 if ( mixerfd == -1 ) {
8874 errorText_ = "RtApiOss::getDeviceCount: error opening '/dev/mixer'.";
8875 error( RtAudioError::WARNING );
8879 oss_sysinfo sysinfo;
8880 if ( ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo ) == -1 ) {
8882 errorText_ = "RtApiOss::getDeviceCount: error getting sysinfo, OSS version >= 4.0 is required.";
8883 error( RtAudioError::WARNING );
8888 return sysinfo.numaudios;
8891 RtAudio::DeviceInfo RtApiOss :: getDeviceInfo( unsigned int device )
8893 RtAudio::DeviceInfo info;
8894 info.probed = false;
8896 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
8897 if ( mixerfd == -1 ) {
8898 errorText_ = "RtApiOss::getDeviceInfo: error opening '/dev/mixer'.";
8899 error( RtAudioError::WARNING );
8903 oss_sysinfo sysinfo;
8904 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
8905 if ( result == -1 ) {
8907 errorText_ = "RtApiOss::getDeviceInfo: error getting sysinfo, OSS version >= 4.0 is required.";
8908 error( RtAudioError::WARNING );
8912 unsigned nDevices = sysinfo.numaudios;
8913 if ( nDevices == 0 ) {
8915 errorText_ = "RtApiOss::getDeviceInfo: no devices found!";
8916 error( RtAudioError::INVALID_USE );
8920 if ( device >= nDevices ) {
8922 errorText_ = "RtApiOss::getDeviceInfo: device ID is invalid!";
8923 error( RtAudioError::INVALID_USE );
8927 oss_audioinfo ainfo;
8929 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
8931 if ( result == -1 ) {
8932 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
8933 errorText_ = errorStream_.str();
8934 error( RtAudioError::WARNING );
8939 if ( ainfo.caps & PCM_CAP_OUTPUT ) info.outputChannels = ainfo.max_channels;
8940 if ( ainfo.caps & PCM_CAP_INPUT ) info.inputChannels = ainfo.max_channels;
8941 if ( ainfo.caps & PCM_CAP_DUPLEX ) {
8942 if ( info.outputChannels > 0 && info.inputChannels > 0 && ainfo.caps & PCM_CAP_DUPLEX )
8943 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
8946 // Probe data formats ... do for input
8947 unsigned long mask = ainfo.iformats;
8948 if ( mask & AFMT_S16_LE || mask & AFMT_S16_BE )
8949 info.nativeFormats |= RTAUDIO_SINT16;
8950 if ( mask & AFMT_S8 )
8951 info.nativeFormats |= RTAUDIO_SINT8;
8952 if ( mask & AFMT_S32_LE || mask & AFMT_S32_BE )
8953 info.nativeFormats |= RTAUDIO_SINT32;
8955 if ( mask & AFMT_FLOAT )
8956 info.nativeFormats |= RTAUDIO_FLOAT32;
8958 if ( mask & AFMT_S24_LE || mask & AFMT_S24_BE )
8959 info.nativeFormats |= RTAUDIO_SINT24;
8961 // Check that we have at least one supported format
8962 if ( info.nativeFormats == 0 ) {
8963 errorStream_ << "RtApiOss::getDeviceInfo: device (" << ainfo.name << ") data format not supported by RtAudio.";
8964 errorText_ = errorStream_.str();
8965 error( RtAudioError::WARNING );
8969 // Probe the supported sample rates.
8970 info.sampleRates.clear();
8971 if ( ainfo.nrates ) {
8972 for ( unsigned int i=0; i<ainfo.nrates; i++ ) {
8973 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
8974 if ( ainfo.rates[i] == SAMPLE_RATES[k] ) {
8975 info.sampleRates.push_back( SAMPLE_RATES[k] );
8977 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
8978 info.preferredSampleRate = SAMPLE_RATES[k];
8986 // Check min and max rate values;
8987 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
8988 if ( ainfo.min_rate <= (int) SAMPLE_RATES[k] && ainfo.max_rate >= (int) SAMPLE_RATES[k] ) {
8989 info.sampleRates.push_back( SAMPLE_RATES[k] );
8991 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
8992 info.preferredSampleRate = SAMPLE_RATES[k];
8997 if ( info.sampleRates.size() == 0 ) {
8998 errorStream_ << "RtApiOss::getDeviceInfo: no supported sample rates found for device (" << ainfo.name << ").";
8999 errorText_ = errorStream_.str();
9000 error( RtAudioError::WARNING );
9004 info.name = ainfo.name;
9011 bool RtApiOss :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
9012 unsigned int firstChannel, unsigned int sampleRate,
9013 RtAudioFormat format, unsigned int *bufferSize,
9014 RtAudio::StreamOptions *options )
9016 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
9017 if ( mixerfd == -1 ) {
9018 errorText_ = "RtApiOss::probeDeviceOpen: error opening '/dev/mixer'.";
9022 oss_sysinfo sysinfo;
9023 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
9024 if ( result == -1 ) {
9026 errorText_ = "RtApiOss::probeDeviceOpen: error getting sysinfo, OSS version >= 4.0 is required.";
9030 unsigned nDevices = sysinfo.numaudios;
9031 if ( nDevices == 0 ) {
9032 // This should not happen because a check is made before this function is called.
9034 errorText_ = "RtApiOss::probeDeviceOpen: no devices found!";
9038 if ( device >= nDevices ) {
9039 // This should not happen because a check is made before this function is called.
9041 errorText_ = "RtApiOss::probeDeviceOpen: device ID is invalid!";
9045 oss_audioinfo ainfo;
9047 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
9049 if ( result == -1 ) {
9050 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
9051 errorText_ = errorStream_.str();
9055 // Check if device supports input or output
9056 if ( ( mode == OUTPUT && !( ainfo.caps & PCM_CAP_OUTPUT ) ) ||
9057 ( mode == INPUT && !( ainfo.caps & PCM_CAP_INPUT ) ) ) {
9058 if ( mode == OUTPUT )
9059 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support output.";
9061 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support input.";
9062 errorText_ = errorStream_.str();
9067 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9068 if ( mode == OUTPUT )
9070 else { // mode == INPUT
9071 if (stream_.mode == OUTPUT && stream_.device[0] == device) {
9072 // We just set the same device for playback ... close and reopen for duplex (OSS only).
9073 close( handle->id[0] );
9075 if ( !( ainfo.caps & PCM_CAP_DUPLEX ) ) {
9076 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support duplex mode.";
9077 errorText_ = errorStream_.str();
9080 // Check that the number previously set channels is the same.
9081 if ( stream_.nUserChannels[0] != channels ) {
9082 errorStream_ << "RtApiOss::probeDeviceOpen: input/output channels must be equal for OSS duplex device (" << ainfo.name << ").";
9083 errorText_ = errorStream_.str();
9092 // Set exclusive access if specified.
9093 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) flags |= O_EXCL;
9095 // Try to open the device.
9097 fd = open( ainfo.devnode, flags, 0 );
9099 if ( errno == EBUSY )
9100 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") is busy.";
9102 errorStream_ << "RtApiOss::probeDeviceOpen: error opening device (" << ainfo.name << ").";
9103 errorText_ = errorStream_.str();
9107 // For duplex operation, specifically set this mode (this doesn't seem to work).
9109 if ( flags | O_RDWR ) {
9110 result = ioctl( fd, SNDCTL_DSP_SETDUPLEX, NULL );
9111 if ( result == -1) {
9112 errorStream_ << "RtApiOss::probeDeviceOpen: error setting duplex mode for device (" << ainfo.name << ").";
9113 errorText_ = errorStream_.str();
9119 // Check the device channel support.
9120 stream_.nUserChannels[mode] = channels;
9121 if ( ainfo.max_channels < (int)(channels + firstChannel) ) {
9123 errorStream_ << "RtApiOss::probeDeviceOpen: the device (" << ainfo.name << ") does not support requested channel parameters.";
9124 errorText_ = errorStream_.str();
9128 // Set the number of channels.
9129 int deviceChannels = channels + firstChannel;
9130 result = ioctl( fd, SNDCTL_DSP_CHANNELS, &deviceChannels );
9131 if ( result == -1 || deviceChannels < (int)(channels + firstChannel) ) {
9133 errorStream_ << "RtApiOss::probeDeviceOpen: error setting channel parameters on device (" << ainfo.name << ").";
9134 errorText_ = errorStream_.str();
9137 stream_.nDeviceChannels[mode] = deviceChannels;
9139 // Get the data format mask
9141 result = ioctl( fd, SNDCTL_DSP_GETFMTS, &mask );
9142 if ( result == -1 ) {
9144 errorStream_ << "RtApiOss::probeDeviceOpen: error getting device (" << ainfo.name << ") data formats.";
9145 errorText_ = errorStream_.str();
9149 // Determine how to set the device format.
9150 stream_.userFormat = format;
9151 int deviceFormat = -1;
9152 stream_.doByteSwap[mode] = false;
9153 if ( format == RTAUDIO_SINT8 ) {
9154 if ( mask & AFMT_S8 ) {
9155 deviceFormat = AFMT_S8;
9156 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
9159 else if ( format == RTAUDIO_SINT16 ) {
9160 if ( mask & AFMT_S16_NE ) {
9161 deviceFormat = AFMT_S16_NE;
9162 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9164 else if ( mask & AFMT_S16_OE ) {
9165 deviceFormat = AFMT_S16_OE;
9166 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9167 stream_.doByteSwap[mode] = true;
9170 else if ( format == RTAUDIO_SINT24 ) {
9171 if ( mask & AFMT_S24_NE ) {
9172 deviceFormat = AFMT_S24_NE;
9173 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9175 else if ( mask & AFMT_S24_OE ) {
9176 deviceFormat = AFMT_S24_OE;
9177 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9178 stream_.doByteSwap[mode] = true;
9181 else if ( format == RTAUDIO_SINT32 ) {
9182 if ( mask & AFMT_S32_NE ) {
9183 deviceFormat = AFMT_S32_NE;
9184 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9186 else if ( mask & AFMT_S32_OE ) {
9187 deviceFormat = AFMT_S32_OE;
9188 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9189 stream_.doByteSwap[mode] = true;
9193 if ( deviceFormat == -1 ) {
9194 // The user requested format is not natively supported by the device.
9195 if ( mask & AFMT_S16_NE ) {
9196 deviceFormat = AFMT_S16_NE;
9197 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9199 else if ( mask & AFMT_S32_NE ) {
9200 deviceFormat = AFMT_S32_NE;
9201 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9203 else if ( mask & AFMT_S24_NE ) {
9204 deviceFormat = AFMT_S24_NE;
9205 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9207 else if ( mask & AFMT_S16_OE ) {
9208 deviceFormat = AFMT_S16_OE;
9209 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9210 stream_.doByteSwap[mode] = true;
9212 else if ( mask & AFMT_S32_OE ) {
9213 deviceFormat = AFMT_S32_OE;
9214 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9215 stream_.doByteSwap[mode] = true;
9217 else if ( mask & AFMT_S24_OE ) {
9218 deviceFormat = AFMT_S24_OE;
9219 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9220 stream_.doByteSwap[mode] = true;
9222 else if ( mask & AFMT_S8) {
9223 deviceFormat = AFMT_S8;
9224 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
9228 if ( stream_.deviceFormat[mode] == 0 ) {
9229 // This really shouldn't happen ...
9231 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") data format not supported by RtAudio.";
9232 errorText_ = errorStream_.str();
9236 // Set the data format.
9237 int temp = deviceFormat;
9238 result = ioctl( fd, SNDCTL_DSP_SETFMT, &deviceFormat );
9239 if ( result == -1 || deviceFormat != temp ) {
9241 errorStream_ << "RtApiOss::probeDeviceOpen: error setting data format on device (" << ainfo.name << ").";
9242 errorText_ = errorStream_.str();
9246 // Attempt to set the buffer size. According to OSS, the minimum
9247 // number of buffers is two. The supposed minimum buffer size is 16
9248 // bytes, so that will be our lower bound. The argument to this
9249 // call is in the form 0xMMMMSSSS (hex), where the buffer size (in
9250 // bytes) is given as 2^SSSS and the number of buffers as 2^MMMM.
9251 // We'll check the actual value used near the end of the setup
9253 int ossBufferBytes = *bufferSize * formatBytes( stream_.deviceFormat[mode] ) * deviceChannels;
9254 if ( ossBufferBytes < 16 ) ossBufferBytes = 16;
9256 if ( options ) buffers = options->numberOfBuffers;
9257 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) buffers = 2;
9258 if ( buffers < 2 ) buffers = 3;
9259 temp = ((int) buffers << 16) + (int)( log10( (double)ossBufferBytes ) / log10( 2.0 ) );
9260 result = ioctl( fd, SNDCTL_DSP_SETFRAGMENT, &temp );
9261 if ( result == -1 ) {
9263 errorStream_ << "RtApiOss::probeDeviceOpen: error setting buffer size on device (" << ainfo.name << ").";
9264 errorText_ = errorStream_.str();
9267 stream_.nBuffers = buffers;
9269 // Save buffer size (in sample frames).
9270 *bufferSize = ossBufferBytes / ( formatBytes(stream_.deviceFormat[mode]) * deviceChannels );
9271 stream_.bufferSize = *bufferSize;
9273 // Set the sample rate.
9274 int srate = sampleRate;
9275 result = ioctl( fd, SNDCTL_DSP_SPEED, &srate );
9276 if ( result == -1 ) {
9278 errorStream_ << "RtApiOss::probeDeviceOpen: error setting sample rate (" << sampleRate << ") on device (" << ainfo.name << ").";
9279 errorText_ = errorStream_.str();
9283 // Verify the sample rate setup worked.
9284 if ( abs( srate - (int)sampleRate ) > 100 ) {
9286 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support sample rate (" << sampleRate << ").";
9287 errorText_ = errorStream_.str();
9290 stream_.sampleRate = sampleRate;
9292 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device) {
9293 // We're doing duplex setup here.
9294 stream_.deviceFormat[0] = stream_.deviceFormat[1];
9295 stream_.nDeviceChannels[0] = deviceChannels;
9298 // Set interleaving parameters.
9299 stream_.userInterleaved = true;
9300 stream_.deviceInterleaved[mode] = true;
9301 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
9302 stream_.userInterleaved = false;
9304 // Set flags for buffer conversion
9305 stream_.doConvertBuffer[mode] = false;
9306 if ( stream_.userFormat != stream_.deviceFormat[mode] )
9307 stream_.doConvertBuffer[mode] = true;
9308 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
9309 stream_.doConvertBuffer[mode] = true;
9310 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
9311 stream_.nUserChannels[mode] > 1 )
9312 stream_.doConvertBuffer[mode] = true;
9314 // Allocate the stream handles if necessary and then save.
9315 if ( stream_.apiHandle == 0 ) {
9317 handle = new OssHandle;
9319 catch ( std::bad_alloc& ) {
9320 errorText_ = "RtApiOss::probeDeviceOpen: error allocating OssHandle memory.";
9324 if ( pthread_cond_init( &handle->runnable, NULL ) ) {
9325 errorText_ = "RtApiOss::probeDeviceOpen: error initializing pthread condition variable.";
9329 stream_.apiHandle = (void *) handle;
9332 handle = (OssHandle *) stream_.apiHandle;
9334 handle->id[mode] = fd;
9336 // Allocate necessary internal buffers.
9337 unsigned long bufferBytes;
9338 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
9339 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
9340 if ( stream_.userBuffer[mode] == NULL ) {
9341 errorText_ = "RtApiOss::probeDeviceOpen: error allocating user buffer memory.";
9345 if ( stream_.doConvertBuffer[mode] ) {
9347 bool makeBuffer = true;
9348 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
9349 if ( mode == INPUT ) {
9350 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
9351 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
9352 if ( bufferBytes <= bytesOut ) makeBuffer = false;
9357 bufferBytes *= *bufferSize;
9358 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
9359 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
9360 if ( stream_.deviceBuffer == NULL ) {
9361 errorText_ = "RtApiOss::probeDeviceOpen: error allocating device buffer memory.";
9367 stream_.device[mode] = device;
9368 stream_.state = STREAM_STOPPED;
9370 // Setup the buffer conversion information structure.
9371 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
9373 // Setup thread if necessary.
9374 if ( stream_.mode == OUTPUT && mode == INPUT ) {
9375 // We had already set up an output stream.
9376 stream_.mode = DUPLEX;
9377 if ( stream_.device[0] == device ) handle->id[0] = fd;
9380 stream_.mode = mode;
9382 // Setup callback thread.
9383 stream_.callbackInfo.object = (void *) this;
9385 // Set the thread attributes for joinable and realtime scheduling
9386 // priority. The higher priority will only take affect if the
9387 // program is run as root or suid.
9388 pthread_attr_t attr;
9389 pthread_attr_init( &attr );
9390 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
9391 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
9392 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
9393 stream_.callbackInfo.doRealtime = true;
9394 struct sched_param param;
9395 int priority = options->priority;
9396 int min = sched_get_priority_min( SCHED_RR );
9397 int max = sched_get_priority_max( SCHED_RR );
9398 if ( priority < min ) priority = min;
9399 else if ( priority > max ) priority = max;
9400 param.sched_priority = priority;
9402 // Set the policy BEFORE the priority. Otherwise it fails.
9403 pthread_attr_setschedpolicy(&attr, SCHED_RR);
9404 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
9405 // This is definitely required. Otherwise it fails.
9406 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
9407 pthread_attr_setschedparam(&attr, ¶m);
9410 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
9412 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
9415 stream_.callbackInfo.isRunning = true;
9416 result = pthread_create( &stream_.callbackInfo.thread, &attr, ossCallbackHandler, &stream_.callbackInfo );
9417 pthread_attr_destroy( &attr );
9419 // Failed. Try instead with default attributes.
9420 result = pthread_create( &stream_.callbackInfo.thread, NULL, ossCallbackHandler, &stream_.callbackInfo );
9422 stream_.callbackInfo.isRunning = false;
9423 errorText_ = "RtApiOss::error creating callback thread!";
9433 pthread_cond_destroy( &handle->runnable );
9434 if ( handle->id[0] ) close( handle->id[0] );
9435 if ( handle->id[1] ) close( handle->id[1] );
9437 stream_.apiHandle = 0;
9440 for ( int i=0; i<2; i++ ) {
9441 if ( stream_.userBuffer[i] ) {
9442 free( stream_.userBuffer[i] );
9443 stream_.userBuffer[i] = 0;
9447 if ( stream_.deviceBuffer ) {
9448 free( stream_.deviceBuffer );
9449 stream_.deviceBuffer = 0;
9452 stream_.state = STREAM_CLOSED;
9456 void RtApiOss :: closeStream()
9458 if ( stream_.state == STREAM_CLOSED ) {
9459 errorText_ = "RtApiOss::closeStream(): no open stream to close!";
9460 error( RtAudioError::WARNING );
9464 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9465 stream_.callbackInfo.isRunning = false;
9466 MUTEX_LOCK( &stream_.mutex );
9467 if ( stream_.state == STREAM_STOPPED )
9468 pthread_cond_signal( &handle->runnable );
9469 MUTEX_UNLOCK( &stream_.mutex );
9470 pthread_join( stream_.callbackInfo.thread, NULL );
9472 if ( stream_.state == STREAM_RUNNING ) {
9473 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
9474 ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9476 ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9477 stream_.state = STREAM_STOPPED;
9481 pthread_cond_destroy( &handle->runnable );
9482 if ( handle->id[0] ) close( handle->id[0] );
9483 if ( handle->id[1] ) close( handle->id[1] );
9485 stream_.apiHandle = 0;
9488 for ( int i=0; i<2; i++ ) {
9489 if ( stream_.userBuffer[i] ) {
9490 free( stream_.userBuffer[i] );
9491 stream_.userBuffer[i] = 0;
9495 if ( stream_.deviceBuffer ) {
9496 free( stream_.deviceBuffer );
9497 stream_.deviceBuffer = 0;
9500 stream_.mode = UNINITIALIZED;
9501 stream_.state = STREAM_CLOSED;
9504 void RtApiOss :: startStream()
9507 if ( stream_.state == STREAM_RUNNING ) {
9508 errorText_ = "RtApiOss::startStream(): the stream is already running!";
9509 error( RtAudioError::WARNING );
9513 MUTEX_LOCK( &stream_.mutex );
9515 stream_.state = STREAM_RUNNING;
9517 // No need to do anything else here ... OSS automatically starts
9518 // when fed samples.
9520 MUTEX_UNLOCK( &stream_.mutex );
9522 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9523 pthread_cond_signal( &handle->runnable );
9526 void RtApiOss :: stopStream()
9529 if ( stream_.state == STREAM_STOPPED ) {
9530 errorText_ = "RtApiOss::stopStream(): the stream is already stopped!";
9531 error( RtAudioError::WARNING );
9535 MUTEX_LOCK( &stream_.mutex );
9537 // The state might change while waiting on a mutex.
9538 if ( stream_.state == STREAM_STOPPED ) {
9539 MUTEX_UNLOCK( &stream_.mutex );
9544 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9545 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9547 // Flush the output with zeros a few times.
9550 RtAudioFormat format;
9552 if ( stream_.doConvertBuffer[0] ) {
9553 buffer = stream_.deviceBuffer;
9554 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
9555 format = stream_.deviceFormat[0];
9558 buffer = stream_.userBuffer[0];
9559 samples = stream_.bufferSize * stream_.nUserChannels[0];
9560 format = stream_.userFormat;
9563 memset( buffer, 0, samples * formatBytes(format) );
9564 for ( unsigned int i=0; i<stream_.nBuffers+1; i++ ) {
9565 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9566 if ( result == -1 ) {
9567 errorText_ = "RtApiOss::stopStream: audio write error.";
9568 error( RtAudioError::WARNING );
9572 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9573 if ( result == -1 ) {
9574 errorStream_ << "RtApiOss::stopStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
9575 errorText_ = errorStream_.str();
9578 handle->triggered = false;
9581 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
9582 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9583 if ( result == -1 ) {
9584 errorStream_ << "RtApiOss::stopStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
9585 errorText_ = errorStream_.str();
9591 stream_.state = STREAM_STOPPED;
9592 MUTEX_UNLOCK( &stream_.mutex );
9594 if ( result != -1 ) return;
9595 error( RtAudioError::SYSTEM_ERROR );
9598 void RtApiOss :: abortStream()
9601 if ( stream_.state == STREAM_STOPPED ) {
9602 errorText_ = "RtApiOss::abortStream(): the stream is already stopped!";
9603 error( RtAudioError::WARNING );
9607 MUTEX_LOCK( &stream_.mutex );
9609 // The state might change while waiting on a mutex.
9610 if ( stream_.state == STREAM_STOPPED ) {
9611 MUTEX_UNLOCK( &stream_.mutex );
9616 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9617 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9618 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9619 if ( result == -1 ) {
9620 errorStream_ << "RtApiOss::abortStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
9621 errorText_ = errorStream_.str();
9624 handle->triggered = false;
9627 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
9628 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9629 if ( result == -1 ) {
9630 errorStream_ << "RtApiOss::abortStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
9631 errorText_ = errorStream_.str();
9637 stream_.state = STREAM_STOPPED;
9638 MUTEX_UNLOCK( &stream_.mutex );
9640 if ( result != -1 ) return;
9641 error( RtAudioError::SYSTEM_ERROR );
9644 void RtApiOss :: callbackEvent()
9646 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9647 if ( stream_.state == STREAM_STOPPED ) {
9648 MUTEX_LOCK( &stream_.mutex );
9649 pthread_cond_wait( &handle->runnable, &stream_.mutex );
9650 if ( stream_.state != STREAM_RUNNING ) {
9651 MUTEX_UNLOCK( &stream_.mutex );
9654 MUTEX_UNLOCK( &stream_.mutex );
9657 if ( stream_.state == STREAM_CLOSED ) {
9658 errorText_ = "RtApiOss::callbackEvent(): the stream is closed ... this shouldn't happen!";
9659 error( RtAudioError::WARNING );
9663 // Invoke user callback to get fresh output data.
9664 int doStopStream = 0;
9665 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
9666 double streamTime = getStreamTime();
9667 RtAudioStreamStatus status = 0;
9668 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
9669 status |= RTAUDIO_OUTPUT_UNDERFLOW;
9670 handle->xrun[0] = false;
9672 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
9673 status |= RTAUDIO_INPUT_OVERFLOW;
9674 handle->xrun[1] = false;
9676 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
9677 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
9678 if ( doStopStream == 2 ) {
9679 this->abortStream();
9683 MUTEX_LOCK( &stream_.mutex );
9685 // The state might change while waiting on a mutex.
9686 if ( stream_.state == STREAM_STOPPED ) goto unlock;
9691 RtAudioFormat format;
9693 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9695 // Setup parameters and do buffer conversion if necessary.
9696 if ( stream_.doConvertBuffer[0] ) {
9697 buffer = stream_.deviceBuffer;
9698 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
9699 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
9700 format = stream_.deviceFormat[0];
9703 buffer = stream_.userBuffer[0];
9704 samples = stream_.bufferSize * stream_.nUserChannels[0];
9705 format = stream_.userFormat;
9708 // Do byte swapping if necessary.
9709 if ( stream_.doByteSwap[0] )
9710 byteSwapBuffer( buffer, samples, format );
9712 if ( stream_.mode == DUPLEX && handle->triggered == false ) {
9714 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
9715 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9716 trig = PCM_ENABLE_INPUT|PCM_ENABLE_OUTPUT;
9717 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
9718 handle->triggered = true;
9721 // Write samples to device.
9722 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9724 if ( result == -1 ) {
9725 // We'll assume this is an underrun, though there isn't a
9726 // specific means for determining that.
9727 handle->xrun[0] = true;
9728 errorText_ = "RtApiOss::callbackEvent: audio write error.";
9729 error( RtAudioError::WARNING );
9730 // Continue on to input section.
9734 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
9736 // Setup parameters.
9737 if ( stream_.doConvertBuffer[1] ) {
9738 buffer = stream_.deviceBuffer;
9739 samples = stream_.bufferSize * stream_.nDeviceChannels[1];
9740 format = stream_.deviceFormat[1];
9743 buffer = stream_.userBuffer[1];
9744 samples = stream_.bufferSize * stream_.nUserChannels[1];
9745 format = stream_.userFormat;
9748 // Read samples from device.
9749 result = read( handle->id[1], buffer, samples * formatBytes(format) );
9751 if ( result == -1 ) {
9752 // We'll assume this is an overrun, though there isn't a
9753 // specific means for determining that.
9754 handle->xrun[1] = true;
9755 errorText_ = "RtApiOss::callbackEvent: audio read error.";
9756 error( RtAudioError::WARNING );
9760 // Do byte swapping if necessary.
9761 if ( stream_.doByteSwap[1] )
9762 byteSwapBuffer( buffer, samples, format );
9764 // Do buffer conversion if necessary.
9765 if ( stream_.doConvertBuffer[1] )
9766 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
9770 MUTEX_UNLOCK( &stream_.mutex );
9772 RtApi::tickStreamTime();
9773 if ( doStopStream == 1 ) this->stopStream();
9776 static void *ossCallbackHandler( void *ptr )
9778 CallbackInfo *info = (CallbackInfo *) ptr;
9779 RtApiOss *object = (RtApiOss *) info->object;
9780 bool *isRunning = &info->isRunning;
9782 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
9783 if (info->doRealtime) {
9784 std::cerr << "RtAudio oss: " <<
9785 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
9786 "running realtime scheduling" << std::endl;
9790 while ( *isRunning == true ) {
9791 pthread_testcancel();
9792 object->callbackEvent();
9795 pthread_exit( NULL );
9798 //******************** End of __LINUX_OSS__ *********************//
9802 // *************************************************** //
9804 // Protected common (OS-independent) RtAudio methods.
9806 // *************************************************** //
9808 // This method can be modified to control the behavior of error
9809 // message printing.
9810 void RtApi :: error( RtAudioError::Type type )
9812 errorStream_.str(""); // clear the ostringstream
9814 RtAudioErrorCallback errorCallback = (RtAudioErrorCallback) stream_.callbackInfo.errorCallback;
9815 if ( errorCallback ) {
9816 // abortStream() can generate new error messages. Ignore them. Just keep original one.
9818 if ( firstErrorOccurred_ )
9821 firstErrorOccurred_ = true;
9822 const std::string errorMessage = errorText_;
9824 if ( type != RtAudioError::WARNING && stream_.state != STREAM_STOPPED) {
9825 stream_.callbackInfo.isRunning = false; // exit from the thread
9829 errorCallback( type, errorMessage );
9830 firstErrorOccurred_ = false;
9834 if ( type == RtAudioError::WARNING && showWarnings_ == true )
9835 std::cerr << '\n' << errorText_ << "\n\n";
9836 else if ( type != RtAudioError::WARNING )
9837 throw( RtAudioError( errorText_, type ) );
9840 void RtApi :: verifyStream()
9842 if ( stream_.state == STREAM_CLOSED ) {
9843 errorText_ = "RtApi:: a stream is not open!";
9844 error( RtAudioError::INVALID_USE );
9848 void RtApi :: clearStreamInfo()
9850 stream_.mode = UNINITIALIZED;
9851 stream_.state = STREAM_CLOSED;
9852 stream_.sampleRate = 0;
9853 stream_.bufferSize = 0;
9854 stream_.nBuffers = 0;
9855 stream_.userFormat = 0;
9856 stream_.userInterleaved = true;
9857 stream_.streamTime = 0.0;
9858 stream_.apiHandle = 0;
9859 stream_.deviceBuffer = 0;
9860 stream_.callbackInfo.callback = 0;
9861 stream_.callbackInfo.userData = 0;
9862 stream_.callbackInfo.isRunning = false;
9863 stream_.callbackInfo.errorCallback = 0;
9864 for ( int i=0; i<2; i++ ) {
9865 stream_.device[i] = 11111;
9866 stream_.doConvertBuffer[i] = false;
9867 stream_.deviceInterleaved[i] = true;
9868 stream_.doByteSwap[i] = false;
9869 stream_.nUserChannels[i] = 0;
9870 stream_.nDeviceChannels[i] = 0;
9871 stream_.channelOffset[i] = 0;
9872 stream_.deviceFormat[i] = 0;
9873 stream_.latency[i] = 0;
9874 stream_.userBuffer[i] = 0;
9875 stream_.convertInfo[i].channels = 0;
9876 stream_.convertInfo[i].inJump = 0;
9877 stream_.convertInfo[i].outJump = 0;
9878 stream_.convertInfo[i].inFormat = 0;
9879 stream_.convertInfo[i].outFormat = 0;
9880 stream_.convertInfo[i].inOffset.clear();
9881 stream_.convertInfo[i].outOffset.clear();
9885 unsigned int RtApi :: formatBytes( RtAudioFormat format )
9887 if ( format == RTAUDIO_SINT16 )
9889 else if ( format == RTAUDIO_SINT32 || format == RTAUDIO_FLOAT32 )
9891 else if ( format == RTAUDIO_FLOAT64 )
9893 else if ( format == RTAUDIO_SINT24 )
9895 else if ( format == RTAUDIO_SINT8 )
9898 errorText_ = "RtApi::formatBytes: undefined format.";
9899 error( RtAudioError::WARNING );
9904 void RtApi :: setConvertInfo( StreamMode mode, unsigned int firstChannel )
9906 if ( mode == INPUT ) { // convert device to user buffer
9907 stream_.convertInfo[mode].inJump = stream_.nDeviceChannels[1];
9908 stream_.convertInfo[mode].outJump = stream_.nUserChannels[1];
9909 stream_.convertInfo[mode].inFormat = stream_.deviceFormat[1];
9910 stream_.convertInfo[mode].outFormat = stream_.userFormat;
9912 else { // convert user to device buffer
9913 stream_.convertInfo[mode].inJump = stream_.nUserChannels[0];
9914 stream_.convertInfo[mode].outJump = stream_.nDeviceChannels[0];
9915 stream_.convertInfo[mode].inFormat = stream_.userFormat;
9916 stream_.convertInfo[mode].outFormat = stream_.deviceFormat[0];
9919 if ( stream_.convertInfo[mode].inJump < stream_.convertInfo[mode].outJump )
9920 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].inJump;
9922 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].outJump;
9924 // Set up the interleave/deinterleave offsets.
9925 if ( stream_.deviceInterleaved[mode] != stream_.userInterleaved ) {
9926 if ( ( mode == OUTPUT && stream_.deviceInterleaved[mode] ) ||
9927 ( mode == INPUT && stream_.userInterleaved ) ) {
9928 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
9929 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
9930 stream_.convertInfo[mode].outOffset.push_back( k );
9931 stream_.convertInfo[mode].inJump = 1;
9935 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
9936 stream_.convertInfo[mode].inOffset.push_back( k );
9937 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
9938 stream_.convertInfo[mode].outJump = 1;
9942 else { // no (de)interleaving
9943 if ( stream_.userInterleaved ) {
9944 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
9945 stream_.convertInfo[mode].inOffset.push_back( k );
9946 stream_.convertInfo[mode].outOffset.push_back( k );
9950 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
9951 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
9952 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
9953 stream_.convertInfo[mode].inJump = 1;
9954 stream_.convertInfo[mode].outJump = 1;
9959 // Add channel offset.
9960 if ( firstChannel > 0 ) {
9961 if ( stream_.deviceInterleaved[mode] ) {
9962 if ( mode == OUTPUT ) {
9963 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
9964 stream_.convertInfo[mode].outOffset[k] += firstChannel;
9967 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
9968 stream_.convertInfo[mode].inOffset[k] += firstChannel;
9972 if ( mode == OUTPUT ) {
9973 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
9974 stream_.convertInfo[mode].outOffset[k] += ( firstChannel * stream_.bufferSize );
9977 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
9978 stream_.convertInfo[mode].inOffset[k] += ( firstChannel * stream_.bufferSize );
9984 void RtApi :: convertBuffer( char *outBuffer, char *inBuffer, ConvertInfo &info )
9986 // This function does format conversion, input/output channel compensation, and
9987 // data interleaving/deinterleaving. 24-bit integers are assumed to occupy
9988 // the lower three bytes of a 32-bit integer.
9990 // Clear our device buffer when in/out duplex device channels are different
9991 if ( outBuffer == stream_.deviceBuffer && stream_.mode == DUPLEX &&
9992 ( stream_.nDeviceChannels[0] < stream_.nDeviceChannels[1] ) )
9993 memset( outBuffer, 0, stream_.bufferSize * info.outJump * formatBytes( info.outFormat ) );
9996 if (info.outFormat == RTAUDIO_FLOAT64) {
9998 Float64 *out = (Float64 *)outBuffer;
10000 if (info.inFormat == RTAUDIO_SINT8) {
10001 signed char *in = (signed char *)inBuffer;
10002 scale = 1.0 / 127.5;
10003 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10004 for (j=0; j<info.channels; j++) {
10005 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
10006 out[info.outOffset[j]] += 0.5;
10007 out[info.outOffset[j]] *= scale;
10010 out += info.outJump;
10013 else if (info.inFormat == RTAUDIO_SINT16) {
10014 Int16 *in = (Int16 *)inBuffer;
10015 scale = 1.0 / 32767.5;
10016 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10017 for (j=0; j<info.channels; j++) {
10018 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
10019 out[info.outOffset[j]] += 0.5;
10020 out[info.outOffset[j]] *= scale;
10023 out += info.outJump;
10026 else if (info.inFormat == RTAUDIO_SINT24) {
10027 Int24 *in = (Int24 *)inBuffer;
10028 scale = 1.0 / 8388607.5;
10029 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10030 for (j=0; j<info.channels; j++) {
10031 out[info.outOffset[j]] = (Float64) (in[info.inOffset[j]].asInt());
10032 out[info.outOffset[j]] += 0.5;
10033 out[info.outOffset[j]] *= scale;
10036 out += info.outJump;
10039 else if (info.inFormat == RTAUDIO_SINT32) {
10040 Int32 *in = (Int32 *)inBuffer;
10041 scale = 1.0 / 2147483647.5;
10042 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10043 for (j=0; j<info.channels; j++) {
10044 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
10045 out[info.outOffset[j]] += 0.5;
10046 out[info.outOffset[j]] *= scale;
10049 out += info.outJump;
10052 else if (info.inFormat == RTAUDIO_FLOAT32) {
10053 Float32 *in = (Float32 *)inBuffer;
10054 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10055 for (j=0; j<info.channels; j++) {
10056 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
10059 out += info.outJump;
10062 else if (info.inFormat == RTAUDIO_FLOAT64) {
10063 // Channel compensation and/or (de)interleaving only.
10064 Float64 *in = (Float64 *)inBuffer;
10065 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10066 for (j=0; j<info.channels; j++) {
10067 out[info.outOffset[j]] = in[info.inOffset[j]];
10070 out += info.outJump;
10074 else if (info.outFormat == RTAUDIO_FLOAT32) {
10076 Float32 *out = (Float32 *)outBuffer;
10078 if (info.inFormat == RTAUDIO_SINT8) {
10079 signed char *in = (signed char *)inBuffer;
10080 scale = (Float32) ( 1.0 / 127.5 );
10081 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10082 for (j=0; j<info.channels; j++) {
10083 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10084 out[info.outOffset[j]] += 0.5;
10085 out[info.outOffset[j]] *= scale;
10088 out += info.outJump;
10091 else if (info.inFormat == RTAUDIO_SINT16) {
10092 Int16 *in = (Int16 *)inBuffer;
10093 scale = (Float32) ( 1.0 / 32767.5 );
10094 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10095 for (j=0; j<info.channels; j++) {
10096 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10097 out[info.outOffset[j]] += 0.5;
10098 out[info.outOffset[j]] *= scale;
10101 out += info.outJump;
10104 else if (info.inFormat == RTAUDIO_SINT24) {
10105 Int24 *in = (Int24 *)inBuffer;
10106 scale = (Float32) ( 1.0 / 8388607.5 );
10107 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10108 for (j=0; j<info.channels; j++) {
10109 out[info.outOffset[j]] = (Float32) (in[info.inOffset[j]].asInt());
10110 out[info.outOffset[j]] += 0.5;
10111 out[info.outOffset[j]] *= scale;
10114 out += info.outJump;
10117 else if (info.inFormat == RTAUDIO_SINT32) {
10118 Int32 *in = (Int32 *)inBuffer;
10119 scale = (Float32) ( 1.0 / 2147483647.5 );
10120 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10121 for (j=0; j<info.channels; j++) {
10122 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10123 out[info.outOffset[j]] += 0.5;
10124 out[info.outOffset[j]] *= scale;
10127 out += info.outJump;
10130 else if (info.inFormat == RTAUDIO_FLOAT32) {
10131 // Channel compensation and/or (de)interleaving only.
10132 Float32 *in = (Float32 *)inBuffer;
10133 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10134 for (j=0; j<info.channels; j++) {
10135 out[info.outOffset[j]] = in[info.inOffset[j]];
10138 out += info.outJump;
10141 else if (info.inFormat == RTAUDIO_FLOAT64) {
10142 Float64 *in = (Float64 *)inBuffer;
10143 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10144 for (j=0; j<info.channels; j++) {
10145 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
10148 out += info.outJump;
10152 else if (info.outFormat == RTAUDIO_SINT32) {
10153 Int32 *out = (Int32 *)outBuffer;
10154 if (info.inFormat == RTAUDIO_SINT8) {
10155 signed char *in = (signed char *)inBuffer;
10156 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10157 for (j=0; j<info.channels; j++) {
10158 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
10159 out[info.outOffset[j]] <<= 24;
10162 out += info.outJump;
10165 else if (info.inFormat == RTAUDIO_SINT16) {
10166 Int16 *in = (Int16 *)inBuffer;
10167 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10168 for (j=0; j<info.channels; j++) {
10169 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
10170 out[info.outOffset[j]] <<= 16;
10173 out += info.outJump;
10176 else if (info.inFormat == RTAUDIO_SINT24) {
10177 Int24 *in = (Int24 *)inBuffer;
10178 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10179 for (j=0; j<info.channels; j++) {
10180 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]].asInt();
10181 out[info.outOffset[j]] <<= 8;
10184 out += info.outJump;
10187 else if (info.inFormat == RTAUDIO_SINT32) {
10188 // Channel compensation and/or (de)interleaving only.
10189 Int32 *in = (Int32 *)inBuffer;
10190 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10191 for (j=0; j<info.channels; j++) {
10192 out[info.outOffset[j]] = in[info.inOffset[j]];
10195 out += info.outJump;
10198 else if (info.inFormat == RTAUDIO_FLOAT32) {
10199 Float32 *in = (Float32 *)inBuffer;
10200 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10201 for (j=0; j<info.channels; j++) {
10202 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
10205 out += info.outJump;
10208 else if (info.inFormat == RTAUDIO_FLOAT64) {
10209 Float64 *in = (Float64 *)inBuffer;
10210 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10211 for (j=0; j<info.channels; j++) {
10212 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
10215 out += info.outJump;
10219 else if (info.outFormat == RTAUDIO_SINT24) {
10220 Int24 *out = (Int24 *)outBuffer;
10221 if (info.inFormat == RTAUDIO_SINT8) {
10222 signed char *in = (signed char *)inBuffer;
10223 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10224 for (j=0; j<info.channels; j++) {
10225 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 16);
10226 //out[info.outOffset[j]] <<= 16;
10229 out += info.outJump;
10232 else if (info.inFormat == RTAUDIO_SINT16) {
10233 Int16 *in = (Int16 *)inBuffer;
10234 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10235 for (j=0; j<info.channels; j++) {
10236 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 8);
10237 //out[info.outOffset[j]] <<= 8;
10240 out += info.outJump;
10243 else if (info.inFormat == RTAUDIO_SINT24) {
10244 // Channel compensation and/or (de)interleaving only.
10245 Int24 *in = (Int24 *)inBuffer;
10246 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10247 for (j=0; j<info.channels; j++) {
10248 out[info.outOffset[j]] = in[info.inOffset[j]];
10251 out += info.outJump;
10254 else if (info.inFormat == RTAUDIO_SINT32) {
10255 Int32 *in = (Int32 *)inBuffer;
10256 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10257 for (j=0; j<info.channels; j++) {
10258 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] >> 8);
10259 //out[info.outOffset[j]] >>= 8;
10262 out += info.outJump;
10265 else if (info.inFormat == RTAUDIO_FLOAT32) {
10266 Float32 *in = (Float32 *)inBuffer;
10267 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10268 for (j=0; j<info.channels; j++) {
10269 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
10272 out += info.outJump;
10275 else if (info.inFormat == RTAUDIO_FLOAT64) {
10276 Float64 *in = (Float64 *)inBuffer;
10277 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10278 for (j=0; j<info.channels; j++) {
10279 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
10282 out += info.outJump;
10286 else if (info.outFormat == RTAUDIO_SINT16) {
10287 Int16 *out = (Int16 *)outBuffer;
10288 if (info.inFormat == RTAUDIO_SINT8) {
10289 signed char *in = (signed char *)inBuffer;
10290 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10291 for (j=0; j<info.channels; j++) {
10292 out[info.outOffset[j]] = (Int16) in[info.inOffset[j]];
10293 out[info.outOffset[j]] <<= 8;
10296 out += info.outJump;
10299 else if (info.inFormat == RTAUDIO_SINT16) {
10300 // Channel compensation and/or (de)interleaving only.
10301 Int16 *in = (Int16 *)inBuffer;
10302 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10303 for (j=0; j<info.channels; j++) {
10304 out[info.outOffset[j]] = in[info.inOffset[j]];
10307 out += info.outJump;
10310 else if (info.inFormat == RTAUDIO_SINT24) {
10311 Int24 *in = (Int24 *)inBuffer;
10312 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10313 for (j=0; j<info.channels; j++) {
10314 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]].asInt() >> 8);
10317 out += info.outJump;
10320 else if (info.inFormat == RTAUDIO_SINT32) {
10321 Int32 *in = (Int32 *)inBuffer;
10322 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10323 for (j=0; j<info.channels; j++) {
10324 out[info.outOffset[j]] = (Int16) ((in[info.inOffset[j]] >> 16) & 0x0000ffff);
10327 out += info.outJump;
10330 else if (info.inFormat == RTAUDIO_FLOAT32) {
10331 Float32 *in = (Float32 *)inBuffer;
10332 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10333 for (j=0; j<info.channels; j++) {
10334 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
10337 out += info.outJump;
10340 else if (info.inFormat == RTAUDIO_FLOAT64) {
10341 Float64 *in = (Float64 *)inBuffer;
10342 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10343 for (j=0; j<info.channels; j++) {
10344 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
10347 out += info.outJump;
10351 else if (info.outFormat == RTAUDIO_SINT8) {
10352 signed char *out = (signed char *)outBuffer;
10353 if (info.inFormat == RTAUDIO_SINT8) {
10354 // Channel compensation and/or (de)interleaving only.
10355 signed char *in = (signed char *)inBuffer;
10356 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10357 for (j=0; j<info.channels; j++) {
10358 out[info.outOffset[j]] = in[info.inOffset[j]];
10361 out += info.outJump;
10364 if (info.inFormat == RTAUDIO_SINT16) {
10365 Int16 *in = (Int16 *)inBuffer;
10366 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10367 for (j=0; j<info.channels; j++) {
10368 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 8) & 0x00ff);
10371 out += info.outJump;
10374 else if (info.inFormat == RTAUDIO_SINT24) {
10375 Int24 *in = (Int24 *)inBuffer;
10376 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10377 for (j=0; j<info.channels; j++) {
10378 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]].asInt() >> 16);
10381 out += info.outJump;
10384 else if (info.inFormat == RTAUDIO_SINT32) {
10385 Int32 *in = (Int32 *)inBuffer;
10386 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10387 for (j=0; j<info.channels; j++) {
10388 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 24) & 0x000000ff);
10391 out += info.outJump;
10394 else if (info.inFormat == RTAUDIO_FLOAT32) {
10395 Float32 *in = (Float32 *)inBuffer;
10396 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10397 for (j=0; j<info.channels; j++) {
10398 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
10401 out += info.outJump;
10404 else if (info.inFormat == RTAUDIO_FLOAT64) {
10405 Float64 *in = (Float64 *)inBuffer;
10406 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10407 for (j=0; j<info.channels; j++) {
10408 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
10411 out += info.outJump;
10417 //static inline uint16_t bswap_16(uint16_t x) { return (x>>8) | (x<<8); }
10418 //static inline uint32_t bswap_32(uint32_t x) { return (bswap_16(x&0xffff)<<16) | (bswap_16(x>>16)); }
10419 //static inline uint64_t bswap_64(uint64_t x) { return (((unsigned long long)bswap_32(x&0xffffffffull))<<32) | (bswap_32(x>>32)); }
10421 void RtApi :: byteSwapBuffer( char *buffer, unsigned int samples, RtAudioFormat format )
10427 if ( format == RTAUDIO_SINT16 ) {
10428 for ( unsigned int i=0; i<samples; i++ ) {
10429 // Swap 1st and 2nd bytes.
10434 // Increment 2 bytes.
10438 else if ( format == RTAUDIO_SINT32 ||
10439 format == RTAUDIO_FLOAT32 ) {
10440 for ( unsigned int i=0; i<samples; i++ ) {
10441 // Swap 1st and 4th bytes.
10446 // Swap 2nd and 3rd bytes.
10452 // Increment 3 more bytes.
10456 else if ( format == RTAUDIO_SINT24 ) {
10457 for ( unsigned int i=0; i<samples; i++ ) {
10458 // Swap 1st and 3rd bytes.
10463 // Increment 2 more bytes.
10467 else if ( format == RTAUDIO_FLOAT64 ) {
10468 for ( unsigned int i=0; i<samples; i++ ) {
10469 // Swap 1st and 8th bytes
10474 // Swap 2nd and 7th bytes
10480 // Swap 3rd and 6th bytes
10486 // Swap 4th and 5th bytes
10492 // Increment 5 more bytes.
10498 // Indentation settings for Vim and Emacs
10500 // Local Variables:
10501 // c-basic-offset: 2
10502 // indent-tabs-mode: nil
10505 // vim: et sts=2 sw=2