1 /************************************************************************/
3 \brief Realtime audio i/o C++ classes.
5 RtAudio provides a common API (Application Programming Interface)
6 for realtime audio input/output across Linux (native ALSA, Jack,
7 and OSS), Macintosh OS X (CoreAudio and Jack), and Windows
8 (DirectSound, ASIO and WASAPI) operating systems.
10 RtAudio WWW site: http://www.music.mcgill.ca/~gary/rtaudio/
12 RtAudio: realtime audio i/o C++ classes
13 Copyright (c) 2001-2017 Gary P. Scavone
15 Permission is hereby granted, free of charge, to any person
16 obtaining a copy of this software and associated documentation files
17 (the "Software"), to deal in the Software without restriction,
18 including without limitation the rights to use, copy, modify, merge,
19 publish, distribute, sublicense, and/or sell copies of the Software,
20 and to permit persons to whom the Software is furnished to do so,
21 subject to the following conditions:
23 The above copyright notice and this permission notice shall be
24 included in all copies or substantial portions of the Software.
26 Any person wishing to distribute modifications to the Software is
27 asked to send the modifications to the original developer so that
28 they can be incorporated into the canonical version. This is,
29 however, not a binding provision of this license.
31 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
32 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
33 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
34 IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
35 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
36 CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
37 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
39 /************************************************************************/
41 // RtAudio: Version 5.0.0
51 // Static variable definitions.
52 const unsigned int RtApi::MAX_SAMPLE_RATES = 14;
53 const unsigned int RtApi::SAMPLE_RATES[] = {
54 4000, 5512, 8000, 9600, 11025, 16000, 22050,
55 32000, 44100, 48000, 88200, 96000, 176400, 192000
58 #if defined(__WINDOWS_DS__) || defined(__WINDOWS_ASIO__) || defined(__WINDOWS_WASAPI__)
59 #define MUTEX_INITIALIZE(A) InitializeCriticalSection(A)
60 #define MUTEX_DESTROY(A) DeleteCriticalSection(A)
61 #define MUTEX_LOCK(A) EnterCriticalSection(A)
62 #define MUTEX_UNLOCK(A) LeaveCriticalSection(A)
66 static std::string convertCharPointerToStdString(const char *text)
68 return std::string(text);
71 static std::string convertCharPointerToStdString(const wchar_t *text)
73 int length = WideCharToMultiByte(CP_UTF8, 0, text, -1, NULL, 0, NULL, NULL);
74 std::string s( length-1, '\0' );
75 WideCharToMultiByte(CP_UTF8, 0, text, -1, &s[0], length, NULL, NULL);
79 #elif defined(__LINUX_ALSA__) || defined(__LINUX_PULSE__) || defined(__UNIX_JACK__) || defined(__LINUX_OSS__) || defined(__MACOSX_CORE__)
81 #define MUTEX_INITIALIZE(A) pthread_mutex_init(A, NULL)
82 #define MUTEX_DESTROY(A) pthread_mutex_destroy(A)
83 #define MUTEX_LOCK(A) pthread_mutex_lock(A)
84 #define MUTEX_UNLOCK(A) pthread_mutex_unlock(A)
86 #define MUTEX_INITIALIZE(A) abs(*A) // dummy definitions
87 #define MUTEX_DESTROY(A) abs(*A) // dummy definitions
90 // *************************************************** //
92 // RtAudio definitions.
94 // *************************************************** //
96 std::string RtAudio :: getVersion( void )
98 return RTAUDIO_VERSION;
102 // TODO: replace with initializer list in C++11.
103 // The order here will control the order of RtAudio's API search in
105 // Have to maintain a separate list of API enum identifiers since map
106 // doesn't preserve insertion order.
107 static std::pair< RtAudio::ApiNameMap, std::vector<RtAudio::Api> > init_ApiNames()
109 RtAudio::ApiNameMap names;
110 std::vector<RtAudio::Api> apis;
111 #if defined(__UNIX_JACK__)
112 names["jack"] = std::pair<RtAudio::Api, std::string>(RtAudio::UNIX_JACK, "Jack");
113 apis.push_back(RtAudio::UNIX_JACK);
115 #if defined(__LINUX_PULSE__)
116 names["pulse"] = std::pair<RtAudio::Api, std::string>(RtAudio::LINUX_PULSE, "Pulse");
117 apis.push_back(RtAudio::LINUX_PULSE);
119 #if defined(__LINUX_ALSA__)
120 names["alsa"] = std::pair<RtAudio::Api, std::string>(RtAudio::LINUX_ALSA, "ALSA");
121 apis.push_back(RtAudio::LINUX_ALSA);
123 #if defined(__LINUX_OSS__)
124 names["oss"] = std::pair<RtAudio::Api, std::string>(RtAudio::LINUX_OSS, "OSS");
125 apis.push_back(RtAudio::LINUX_OSS);
127 #if defined(__WINDOWS_ASIO__)
128 names["asio"] = std::pair<RtAudio::Api, std::string>(RtAudio::WINDOWS_ASIO, "ASIO");
129 apis.push_back(RtAudio::WINDOWS_ASIO);
131 #if defined(__WINDOWS_WASAPI__)
132 names["wasapi"] = std::pair<RtAudio::Api, std::string>(RtAudio::WINDOWS_WASAPI, "WASAPI");
133 apis.push_back(RtAudio::WINDOWS_WASAPI);
135 #if defined(__WINDOWS_DS__)
136 names["ds"] = std::pair<RtAudio::Api, std::string>(RtAudio::WINDOWS_DS, "DirectSound");
137 apis.push_back(RtAudio::WINDOWS_DS);
139 #if defined(__MACOSX_CORE__)
140 names["core"] = std::pair<RtAudio::Api, std::string>(RtAudio::MACOSX_CORE, "CoreAudio");
141 apis.push_back(RtAudio::MACOSX_CORE);
143 #if defined(__RTAUDIO_DUMMY__)
144 names["dummy"] = std::pair<RtAudio::Api, std::string>(RtAudio::RTAUDIO_DUMMY, "Dummy");
145 apis.push_back(RtAudio::RTAUDIO_DUMMY);
147 return std::make_pair(names, apis);
150 const RtAudio::ApiNameMap RtAudio::apiNames(init_ApiNames().first);
151 const std::vector<RtAudio::Api> RtAudio::compiledApis(init_ApiNames().second);
153 void RtAudio :: getCompiledApi( std::vector<RtAudio::Api> &apis )
158 const std::vector<RtAudio::Api>& RtAudio :: getCompiledApi()
163 static const std::string unknown_api_name = "";
164 static const std::string unknown_api_display_name = "Unknown";
166 const std::string& RtAudio :: getCompiledApiName( RtAudio::Api api )
168 ApiNameMap::const_iterator it;
169 for (it = apiNames.begin(); it != apiNames.end(); it++)
170 if (it->second.first == api)
172 return unknown_api_name;
175 const std::string& RtAudio :: getCompiledApiDisplayName( RtAudio::Api api )
177 ApiNameMap::const_iterator it;
178 for (it = apiNames.begin(); it != apiNames.end(); it++)
179 if (it->second.first == api)
180 return it->second.second;
181 return unknown_api_display_name;
184 RtAudio::Api RtAudio :: getCompiledApiByName( const std::string &name )
186 if (apiNames.find(name) == apiNames.end())
187 return RtAudio::UNSPECIFIED;
188 return apiNames.at(name).first;
191 void RtAudio :: openRtApi( RtAudio::Api api )
197 #if defined(__UNIX_JACK__)
198 if ( api == UNIX_JACK )
199 rtapi_ = new RtApiJack();
201 #if defined(__LINUX_ALSA__)
202 if ( api == LINUX_ALSA )
203 rtapi_ = new RtApiAlsa();
205 #if defined(__LINUX_PULSE__)
206 if ( api == LINUX_PULSE )
207 rtapi_ = new RtApiPulse();
209 #if defined(__LINUX_OSS__)
210 if ( api == LINUX_OSS )
211 rtapi_ = new RtApiOss();
213 #if defined(__WINDOWS_ASIO__)
214 if ( api == WINDOWS_ASIO )
215 rtapi_ = new RtApiAsio();
217 #if defined(__WINDOWS_WASAPI__)
218 if ( api == WINDOWS_WASAPI )
219 rtapi_ = new RtApiWasapi();
221 #if defined(__WINDOWS_DS__)
222 if ( api == WINDOWS_DS )
223 rtapi_ = new RtApiDs();
225 #if defined(__MACOSX_CORE__)
226 if ( api == MACOSX_CORE )
227 rtapi_ = new RtApiCore();
229 #if defined(__RTAUDIO_DUMMY__)
230 if ( api == RTAUDIO_DUMMY )
231 rtapi_ = new RtApiDummy();
235 RtAudio :: RtAudio( RtAudio::Api api )
239 if ( api != UNSPECIFIED ) {
240 // Attempt to open the specified API.
242 if ( rtapi_ ) return;
244 // No compiled support for specified API value. Issue a debug
245 // warning and continue as if no API was specified.
246 std::cerr << "\nRtAudio: no compiled support for specified API argument!\n" << std::endl;
249 // Iterate through the compiled APIs and return as soon as we find
250 // one with at least one device or we reach the end of the list.
251 std::vector< RtAudio::Api > apis;
252 getCompiledApi( apis );
253 for ( unsigned int i=0; i<apis.size(); i++ ) {
254 openRtApi( apis[i] );
255 if ( rtapi_ && rtapi_->getDeviceCount() ) break;
258 if ( rtapi_ ) return;
260 // It should not be possible to get here because the preprocessor
261 // definition __RTAUDIO_DUMMY__ is automatically defined if no
262 // API-specific definitions are passed to the compiler. But just in
263 // case something weird happens, we'll thow an error.
264 std::string errorText = "\nRtAudio: no compiled API support found ... critical error!!\n\n";
265 throw( RtAudioError( errorText, RtAudioError::UNSPECIFIED ) );
268 RtAudio :: ~RtAudio()
274 void RtAudio :: openStream( RtAudio::StreamParameters *outputParameters,
275 RtAudio::StreamParameters *inputParameters,
276 RtAudioFormat format, unsigned int sampleRate,
277 unsigned int *bufferFrames,
278 RtAudioCallback callback, void *userData,
279 RtAudio::StreamOptions *options,
280 RtAudioErrorCallback errorCallback )
282 return rtapi_->openStream( outputParameters, inputParameters, format,
283 sampleRate, bufferFrames, callback,
284 userData, options, errorCallback );
287 // *************************************************** //
289 // Public RtApi definitions (see end of file for
290 // private or protected utility functions).
292 // *************************************************** //
296 stream_.state = STREAM_CLOSED;
297 stream_.mode = UNINITIALIZED;
298 stream_.apiHandle = 0;
299 stream_.userBuffer[0] = 0;
300 stream_.userBuffer[1] = 0;
301 MUTEX_INITIALIZE( &stream_.mutex );
302 showWarnings_ = true;
303 firstErrorOccurred_ = false;
308 MUTEX_DESTROY( &stream_.mutex );
311 void RtApi :: openStream( RtAudio::StreamParameters *oParams,
312 RtAudio::StreamParameters *iParams,
313 RtAudioFormat format, unsigned int sampleRate,
314 unsigned int *bufferFrames,
315 RtAudioCallback callback, void *userData,
316 RtAudio::StreamOptions *options,
317 RtAudioErrorCallback errorCallback )
319 if ( stream_.state != STREAM_CLOSED ) {
320 errorText_ = "RtApi::openStream: a stream is already open!";
321 error( RtAudioError::INVALID_USE );
325 // Clear stream information potentially left from a previously open stream.
328 if ( oParams && oParams->nChannels < 1 ) {
329 errorText_ = "RtApi::openStream: a non-NULL output StreamParameters structure cannot have an nChannels value less than one.";
330 error( RtAudioError::INVALID_USE );
334 if ( iParams && iParams->nChannels < 1 ) {
335 errorText_ = "RtApi::openStream: a non-NULL input StreamParameters structure cannot have an nChannels value less than one.";
336 error( RtAudioError::INVALID_USE );
340 if ( oParams == NULL && iParams == NULL ) {
341 errorText_ = "RtApi::openStream: input and output StreamParameters structures are both NULL!";
342 error( RtAudioError::INVALID_USE );
346 if ( formatBytes(format) == 0 ) {
347 errorText_ = "RtApi::openStream: 'format' parameter value is undefined.";
348 error( RtAudioError::INVALID_USE );
352 unsigned int nDevices = getDeviceCount();
353 unsigned int oChannels = 0;
355 oChannels = oParams->nChannels;
356 if ( oParams->deviceId >= nDevices ) {
357 errorText_ = "RtApi::openStream: output device parameter value is invalid.";
358 error( RtAudioError::INVALID_USE );
363 unsigned int iChannels = 0;
365 iChannels = iParams->nChannels;
366 if ( iParams->deviceId >= nDevices ) {
367 errorText_ = "RtApi::openStream: input device parameter value is invalid.";
368 error( RtAudioError::INVALID_USE );
375 if ( oChannels > 0 ) {
377 result = probeDeviceOpen( oParams->deviceId, OUTPUT, oChannels, oParams->firstChannel,
378 sampleRate, format, bufferFrames, options );
379 if ( result == false ) {
380 error( RtAudioError::SYSTEM_ERROR );
385 if ( iChannels > 0 ) {
387 result = probeDeviceOpen( iParams->deviceId, INPUT, iChannels, iParams->firstChannel,
388 sampleRate, format, bufferFrames, options );
389 if ( result == false ) {
390 if ( oChannels > 0 ) closeStream();
391 error( RtAudioError::SYSTEM_ERROR );
396 stream_.callbackInfo.callback = (void *) callback;
397 stream_.callbackInfo.userData = userData;
398 stream_.callbackInfo.errorCallback = (void *) errorCallback;
400 if ( options ) options->numberOfBuffers = stream_.nBuffers;
401 stream_.state = STREAM_STOPPED;
404 unsigned int RtApi :: getDefaultInputDevice( void )
406 // Should be implemented in subclasses if possible.
410 unsigned int RtApi :: getDefaultOutputDevice( void )
412 // Should be implemented in subclasses if possible.
416 void RtApi :: closeStream( void )
418 // MUST be implemented in subclasses!
422 bool RtApi :: probeDeviceOpen( unsigned int /*device*/, StreamMode /*mode*/, unsigned int /*channels*/,
423 unsigned int /*firstChannel*/, unsigned int /*sampleRate*/,
424 RtAudioFormat /*format*/, unsigned int * /*bufferSize*/,
425 RtAudio::StreamOptions * /*options*/ )
427 // MUST be implemented in subclasses!
431 void RtApi :: tickStreamTime( void )
433 // Subclasses that do not provide their own implementation of
434 // getStreamTime should call this function once per buffer I/O to
435 // provide basic stream time support.
437 stream_.streamTime += ( stream_.bufferSize * 1.0 / stream_.sampleRate );
439 #if defined( HAVE_GETTIMEOFDAY )
440 gettimeofday( &stream_.lastTickTimestamp, NULL );
444 long RtApi :: getStreamLatency( void )
448 long totalLatency = 0;
449 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
450 totalLatency = stream_.latency[0];
451 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
452 totalLatency += stream_.latency[1];
457 double RtApi :: getStreamTime( void )
461 #if defined( HAVE_GETTIMEOFDAY )
462 // Return a very accurate estimate of the stream time by
463 // adding in the elapsed time since the last tick.
467 if ( stream_.state != STREAM_RUNNING || stream_.streamTime == 0.0 )
468 return stream_.streamTime;
470 gettimeofday( &now, NULL );
471 then = stream_.lastTickTimestamp;
472 return stream_.streamTime +
473 ((now.tv_sec + 0.000001 * now.tv_usec) -
474 (then.tv_sec + 0.000001 * then.tv_usec));
476 return stream_.streamTime;
480 void RtApi :: setStreamTime( double time )
485 stream_.streamTime = time;
486 #if defined( HAVE_GETTIMEOFDAY )
487 gettimeofday( &stream_.lastTickTimestamp, NULL );
491 unsigned int RtApi :: getStreamSampleRate( void )
495 return stream_.sampleRate;
499 // *************************************************** //
501 // OS/API-specific methods.
503 // *************************************************** //
505 #if defined(__MACOSX_CORE__)
507 // The OS X CoreAudio API is designed to use a separate callback
508 // procedure for each of its audio devices. A single RtAudio duplex
509 // stream using two different devices is supported here, though it
510 // cannot be guaranteed to always behave correctly because we cannot
511 // synchronize these two callbacks.
513 // A property listener is installed for over/underrun information.
514 // However, no functionality is currently provided to allow property
515 // listeners to trigger user handlers because it is unclear what could
516 // be done if a critical stream parameter (buffer size, sample rate,
517 // device disconnect) notification arrived. The listeners entail
518 // quite a bit of extra code and most likely, a user program wouldn't
519 // be prepared for the result anyway. However, we do provide a flag
520 // to the client callback function to inform of an over/underrun.
522 // A structure to hold various information related to the CoreAudio API
525 AudioDeviceID id[2]; // device ids
526 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
527 AudioDeviceIOProcID procId[2];
529 UInt32 iStream[2]; // device stream index (or first if using multiple)
530 UInt32 nStreams[2]; // number of streams to use
533 pthread_cond_t condition;
534 int drainCounter; // Tracks callback counts when draining
535 bool internalDrain; // Indicates if stop is initiated from callback or not.
538 :deviceBuffer(0), drainCounter(0), internalDrain(false) { nStreams[0] = 1; nStreams[1] = 1; id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
541 RtApiCore:: RtApiCore()
543 #if defined( AVAILABLE_MAC_OS_X_VERSION_10_6_AND_LATER )
544 // This is a largely undocumented but absolutely necessary
545 // requirement starting with OS-X 10.6. If not called, queries and
546 // updates to various audio device properties are not handled
548 CFRunLoopRef theRunLoop = NULL;
549 AudioObjectPropertyAddress property = { kAudioHardwarePropertyRunLoop,
550 kAudioObjectPropertyScopeGlobal,
551 kAudioObjectPropertyElementMaster };
552 OSStatus result = AudioObjectSetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, sizeof(CFRunLoopRef), &theRunLoop);
553 if ( result != noErr ) {
554 errorText_ = "RtApiCore::RtApiCore: error setting run loop property!";
555 error( RtAudioError::WARNING );
560 RtApiCore :: ~RtApiCore()
562 // The subclass destructor gets called before the base class
563 // destructor, so close an existing stream before deallocating
564 // apiDeviceId memory.
565 if ( stream_.state != STREAM_CLOSED ) closeStream();
568 unsigned int RtApiCore :: getDeviceCount( void )
570 // Find out how many audio devices there are, if any.
572 AudioObjectPropertyAddress propertyAddress = { kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
573 OSStatus result = AudioObjectGetPropertyDataSize( kAudioObjectSystemObject, &propertyAddress, 0, NULL, &dataSize );
574 if ( result != noErr ) {
575 errorText_ = "RtApiCore::getDeviceCount: OS-X error getting device info!";
576 error( RtAudioError::WARNING );
580 return dataSize / sizeof( AudioDeviceID );
583 unsigned int RtApiCore :: getDefaultInputDevice( void )
585 unsigned int nDevices = getDeviceCount();
586 if ( nDevices <= 1 ) return 0;
589 UInt32 dataSize = sizeof( AudioDeviceID );
590 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultInputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
591 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
592 if ( result != noErr ) {
593 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device.";
594 error( RtAudioError::WARNING );
598 dataSize *= nDevices;
599 AudioDeviceID deviceList[ nDevices ];
600 property.mSelector = kAudioHardwarePropertyDevices;
601 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
602 if ( result != noErr ) {
603 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device IDs.";
604 error( RtAudioError::WARNING );
608 for ( unsigned int i=0; i<nDevices; i++ )
609 if ( id == deviceList[i] ) return i;
611 errorText_ = "RtApiCore::getDefaultInputDevice: No default device found!";
612 error( RtAudioError::WARNING );
616 unsigned int RtApiCore :: getDefaultOutputDevice( void )
618 unsigned int nDevices = getDeviceCount();
619 if ( nDevices <= 1 ) return 0;
622 UInt32 dataSize = sizeof( AudioDeviceID );
623 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultOutputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
624 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
625 if ( result != noErr ) {
626 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device.";
627 error( RtAudioError::WARNING );
631 dataSize = sizeof( AudioDeviceID ) * nDevices;
632 AudioDeviceID deviceList[ nDevices ];
633 property.mSelector = kAudioHardwarePropertyDevices;
634 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
635 if ( result != noErr ) {
636 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device IDs.";
637 error( RtAudioError::WARNING );
641 for ( unsigned int i=0; i<nDevices; i++ )
642 if ( id == deviceList[i] ) return i;
644 errorText_ = "RtApiCore::getDefaultOutputDevice: No default device found!";
645 error( RtAudioError::WARNING );
649 RtAudio::DeviceInfo RtApiCore :: getDeviceInfo( unsigned int device )
651 RtAudio::DeviceInfo info;
655 unsigned int nDevices = getDeviceCount();
656 if ( nDevices == 0 ) {
657 errorText_ = "RtApiCore::getDeviceInfo: no devices found!";
658 error( RtAudioError::INVALID_USE );
662 if ( device >= nDevices ) {
663 errorText_ = "RtApiCore::getDeviceInfo: device ID is invalid!";
664 error( RtAudioError::INVALID_USE );
668 AudioDeviceID deviceList[ nDevices ];
669 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
670 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
671 kAudioObjectPropertyScopeGlobal,
672 kAudioObjectPropertyElementMaster };
673 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
674 0, NULL, &dataSize, (void *) &deviceList );
675 if ( result != noErr ) {
676 errorText_ = "RtApiCore::getDeviceInfo: OS-X system error getting device IDs.";
677 error( RtAudioError::WARNING );
681 AudioDeviceID id = deviceList[ device ];
683 // Get the device name.
686 dataSize = sizeof( CFStringRef );
687 property.mSelector = kAudioObjectPropertyManufacturer;
688 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
689 if ( result != noErr ) {
690 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device manufacturer.";
691 errorText_ = errorStream_.str();
692 error( RtAudioError::WARNING );
696 //const char *mname = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
697 int length = CFStringGetLength(cfname);
698 char *mname = (char *)malloc(length * 3 + 1);
699 #if defined( UNICODE ) || defined( _UNICODE )
700 CFStringGetCString(cfname, mname, length * 3 + 1, kCFStringEncodingUTF8);
702 CFStringGetCString(cfname, mname, length * 3 + 1, CFStringGetSystemEncoding());
704 info.name.append( (const char *)mname, strlen(mname) );
705 info.name.append( ": " );
709 property.mSelector = kAudioObjectPropertyName;
710 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
711 if ( result != noErr ) {
712 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device name.";
713 errorText_ = errorStream_.str();
714 error( RtAudioError::WARNING );
718 //const char *name = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
719 length = CFStringGetLength(cfname);
720 char *name = (char *)malloc(length * 3 + 1);
721 #if defined( UNICODE ) || defined( _UNICODE )
722 CFStringGetCString(cfname, name, length * 3 + 1, kCFStringEncodingUTF8);
724 CFStringGetCString(cfname, name, length * 3 + 1, CFStringGetSystemEncoding());
726 info.name.append( (const char *)name, strlen(name) );
730 // Get the output stream "configuration".
731 AudioBufferList *bufferList = nil;
732 property.mSelector = kAudioDevicePropertyStreamConfiguration;
733 property.mScope = kAudioDevicePropertyScopeOutput;
734 // property.mElement = kAudioObjectPropertyElementWildcard;
736 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
737 if ( result != noErr || dataSize == 0 ) {
738 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration info for device (" << device << ").";
739 errorText_ = errorStream_.str();
740 error( RtAudioError::WARNING );
744 // Allocate the AudioBufferList.
745 bufferList = (AudioBufferList *) malloc( dataSize );
746 if ( bufferList == NULL ) {
747 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating output AudioBufferList.";
748 error( RtAudioError::WARNING );
752 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
753 if ( result != noErr || dataSize == 0 ) {
755 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration for device (" << device << ").";
756 errorText_ = errorStream_.str();
757 error( RtAudioError::WARNING );
761 // Get output channel information.
762 unsigned int i, nStreams = bufferList->mNumberBuffers;
763 for ( i=0; i<nStreams; i++ )
764 info.outputChannels += bufferList->mBuffers[i].mNumberChannels;
767 // Get the input stream "configuration".
768 property.mScope = kAudioDevicePropertyScopeInput;
769 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
770 if ( result != noErr || dataSize == 0 ) {
771 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration info for device (" << device << ").";
772 errorText_ = errorStream_.str();
773 error( RtAudioError::WARNING );
777 // Allocate the AudioBufferList.
778 bufferList = (AudioBufferList *) malloc( dataSize );
779 if ( bufferList == NULL ) {
780 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating input AudioBufferList.";
781 error( RtAudioError::WARNING );
785 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
786 if (result != noErr || dataSize == 0) {
788 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration for device (" << device << ").";
789 errorText_ = errorStream_.str();
790 error( RtAudioError::WARNING );
794 // Get input channel information.
795 nStreams = bufferList->mNumberBuffers;
796 for ( i=0; i<nStreams; i++ )
797 info.inputChannels += bufferList->mBuffers[i].mNumberChannels;
800 // If device opens for both playback and capture, we determine the channels.
801 if ( info.outputChannels > 0 && info.inputChannels > 0 )
802 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
804 // Probe the device sample rates.
805 bool isInput = false;
806 if ( info.outputChannels == 0 ) isInput = true;
808 // Determine the supported sample rates.
809 property.mSelector = kAudioDevicePropertyAvailableNominalSampleRates;
810 if ( isInput == false ) property.mScope = kAudioDevicePropertyScopeOutput;
811 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
812 if ( result != kAudioHardwareNoError || dataSize == 0 ) {
813 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rate info.";
814 errorText_ = errorStream_.str();
815 error( RtAudioError::WARNING );
819 UInt32 nRanges = dataSize / sizeof( AudioValueRange );
820 AudioValueRange rangeList[ nRanges ];
821 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &rangeList );
822 if ( result != kAudioHardwareNoError ) {
823 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rates.";
824 errorText_ = errorStream_.str();
825 error( RtAudioError::WARNING );
829 // The sample rate reporting mechanism is a bit of a mystery. It
830 // seems that it can either return individual rates or a range of
831 // rates. I assume that if the min / max range values are the same,
832 // then that represents a single supported rate and if the min / max
833 // range values are different, the device supports an arbitrary
834 // range of values (though there might be multiple ranges, so we'll
835 // use the most conservative range).
836 Float64 minimumRate = 1.0, maximumRate = 10000000000.0;
837 bool haveValueRange = false;
838 info.sampleRates.clear();
839 for ( UInt32 i=0; i<nRanges; i++ ) {
840 if ( rangeList[i].mMinimum == rangeList[i].mMaximum ) {
841 unsigned int tmpSr = (unsigned int) rangeList[i].mMinimum;
842 info.sampleRates.push_back( tmpSr );
844 if ( !info.preferredSampleRate || ( tmpSr <= 48000 && tmpSr > info.preferredSampleRate ) )
845 info.preferredSampleRate = tmpSr;
848 haveValueRange = true;
849 if ( rangeList[i].mMinimum > minimumRate ) minimumRate = rangeList[i].mMinimum;
850 if ( rangeList[i].mMaximum < maximumRate ) maximumRate = rangeList[i].mMaximum;
854 if ( haveValueRange ) {
855 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
856 if ( SAMPLE_RATES[k] >= (unsigned int) minimumRate && SAMPLE_RATES[k] <= (unsigned int) maximumRate ) {
857 info.sampleRates.push_back( SAMPLE_RATES[k] );
859 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
860 info.preferredSampleRate = SAMPLE_RATES[k];
865 // Sort and remove any redundant values
866 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
867 info.sampleRates.erase( unique( info.sampleRates.begin(), info.sampleRates.end() ), info.sampleRates.end() );
869 if ( info.sampleRates.size() == 0 ) {
870 errorStream_ << "RtApiCore::probeDeviceInfo: No supported sample rates found for device (" << device << ").";
871 errorText_ = errorStream_.str();
872 error( RtAudioError::WARNING );
876 // CoreAudio always uses 32-bit floating point data for PCM streams.
877 // Thus, any other "physical" formats supported by the device are of
878 // no interest to the client.
879 info.nativeFormats = RTAUDIO_FLOAT32;
881 if ( info.outputChannels > 0 )
882 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
883 if ( info.inputChannels > 0 )
884 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
890 static OSStatus callbackHandler( AudioDeviceID inDevice,
891 const AudioTimeStamp* /*inNow*/,
892 const AudioBufferList* inInputData,
893 const AudioTimeStamp* /*inInputTime*/,
894 AudioBufferList* outOutputData,
895 const AudioTimeStamp* /*inOutputTime*/,
898 CallbackInfo *info = (CallbackInfo *) infoPointer;
900 RtApiCore *object = (RtApiCore *) info->object;
901 if ( object->callbackEvent( inDevice, inInputData, outOutputData ) == false )
902 return kAudioHardwareUnspecifiedError;
904 return kAudioHardwareNoError;
907 static OSStatus xrunListener( AudioObjectID /*inDevice*/,
909 const AudioObjectPropertyAddress properties[],
910 void* handlePointer )
912 CoreHandle *handle = (CoreHandle *) handlePointer;
913 for ( UInt32 i=0; i<nAddresses; i++ ) {
914 if ( properties[i].mSelector == kAudioDeviceProcessorOverload ) {
915 if ( properties[i].mScope == kAudioDevicePropertyScopeInput )
916 handle->xrun[1] = true;
918 handle->xrun[0] = true;
922 return kAudioHardwareNoError;
925 static OSStatus rateListener( AudioObjectID inDevice,
926 UInt32 /*nAddresses*/,
927 const AudioObjectPropertyAddress /*properties*/[],
930 Float64 *rate = (Float64 *) ratePointer;
931 UInt32 dataSize = sizeof( Float64 );
932 AudioObjectPropertyAddress property = { kAudioDevicePropertyNominalSampleRate,
933 kAudioObjectPropertyScopeGlobal,
934 kAudioObjectPropertyElementMaster };
935 AudioObjectGetPropertyData( inDevice, &property, 0, NULL, &dataSize, rate );
936 return kAudioHardwareNoError;
939 bool RtApiCore :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
940 unsigned int firstChannel, unsigned int sampleRate,
941 RtAudioFormat format, unsigned int *bufferSize,
942 RtAudio::StreamOptions *options )
945 unsigned int nDevices = getDeviceCount();
946 if ( nDevices == 0 ) {
947 // This should not happen because a check is made before this function is called.
948 errorText_ = "RtApiCore::probeDeviceOpen: no devices found!";
952 if ( device >= nDevices ) {
953 // This should not happen because a check is made before this function is called.
954 errorText_ = "RtApiCore::probeDeviceOpen: device ID is invalid!";
958 AudioDeviceID deviceList[ nDevices ];
959 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
960 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
961 kAudioObjectPropertyScopeGlobal,
962 kAudioObjectPropertyElementMaster };
963 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
964 0, NULL, &dataSize, (void *) &deviceList );
965 if ( result != noErr ) {
966 errorText_ = "RtApiCore::probeDeviceOpen: OS-X system error getting device IDs.";
970 AudioDeviceID id = deviceList[ device ];
972 // Setup for stream mode.
973 bool isInput = false;
974 if ( mode == INPUT ) {
976 property.mScope = kAudioDevicePropertyScopeInput;
979 property.mScope = kAudioDevicePropertyScopeOutput;
981 // Get the stream "configuration".
982 AudioBufferList *bufferList = nil;
984 property.mSelector = kAudioDevicePropertyStreamConfiguration;
985 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
986 if ( result != noErr || dataSize == 0 ) {
987 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration info for device (" << device << ").";
988 errorText_ = errorStream_.str();
992 // Allocate the AudioBufferList.
993 bufferList = (AudioBufferList *) malloc( dataSize );
994 if ( bufferList == NULL ) {
995 errorText_ = "RtApiCore::probeDeviceOpen: memory error allocating AudioBufferList.";
999 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
1000 if (result != noErr || dataSize == 0) {
1002 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration for device (" << device << ").";
1003 errorText_ = errorStream_.str();
1007 // Search for one or more streams that contain the desired number of
1008 // channels. CoreAudio devices can have an arbitrary number of
1009 // streams and each stream can have an arbitrary number of channels.
1010 // For each stream, a single buffer of interleaved samples is
1011 // provided. RtAudio prefers the use of one stream of interleaved
1012 // data or multiple consecutive single-channel streams. However, we
1013 // now support multiple consecutive multi-channel streams of
1014 // interleaved data as well.
1015 UInt32 iStream, offsetCounter = firstChannel;
1016 UInt32 nStreams = bufferList->mNumberBuffers;
1017 bool monoMode = false;
1018 bool foundStream = false;
1020 // First check that the device supports the requested number of
1022 UInt32 deviceChannels = 0;
1023 for ( iStream=0; iStream<nStreams; iStream++ )
1024 deviceChannels += bufferList->mBuffers[iStream].mNumberChannels;
1026 if ( deviceChannels < ( channels + firstChannel ) ) {
1028 errorStream_ << "RtApiCore::probeDeviceOpen: the device (" << device << ") does not support the requested channel count.";
1029 errorText_ = errorStream_.str();
1033 // Look for a single stream meeting our needs.
1034 UInt32 firstStream, streamCount = 1, streamChannels = 0, channelOffset = 0;
1035 for ( iStream=0; iStream<nStreams; iStream++ ) {
1036 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
1037 if ( streamChannels >= channels + offsetCounter ) {
1038 firstStream = iStream;
1039 channelOffset = offsetCounter;
1043 if ( streamChannels > offsetCounter ) break;
1044 offsetCounter -= streamChannels;
1047 // If we didn't find a single stream above, then we should be able
1048 // to meet the channel specification with multiple streams.
1049 if ( foundStream == false ) {
1051 offsetCounter = firstChannel;
1052 for ( iStream=0; iStream<nStreams; iStream++ ) {
1053 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
1054 if ( streamChannels > offsetCounter ) break;
1055 offsetCounter -= streamChannels;
1058 firstStream = iStream;
1059 channelOffset = offsetCounter;
1060 Int32 channelCounter = channels + offsetCounter - streamChannels;
1062 if ( streamChannels > 1 ) monoMode = false;
1063 while ( channelCounter > 0 ) {
1064 streamChannels = bufferList->mBuffers[++iStream].mNumberChannels;
1065 if ( streamChannels > 1 ) monoMode = false;
1066 channelCounter -= streamChannels;
1073 // Determine the buffer size.
1074 AudioValueRange bufferRange;
1075 dataSize = sizeof( AudioValueRange );
1076 property.mSelector = kAudioDevicePropertyBufferFrameSizeRange;
1077 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &bufferRange );
1079 if ( result != noErr ) {
1080 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting buffer size range for device (" << device << ").";
1081 errorText_ = errorStream_.str();
1085 if ( bufferRange.mMinimum > *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMinimum;
1086 else if ( bufferRange.mMaximum < *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMaximum;
1087 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) *bufferSize = (unsigned long) bufferRange.mMinimum;
1089 // Set the buffer size. For multiple streams, I'm assuming we only
1090 // need to make this setting for the master channel.
1091 UInt32 theSize = (UInt32) *bufferSize;
1092 dataSize = sizeof( UInt32 );
1093 property.mSelector = kAudioDevicePropertyBufferFrameSize;
1094 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &theSize );
1096 if ( result != noErr ) {
1097 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting the buffer size for device (" << device << ").";
1098 errorText_ = errorStream_.str();
1102 // If attempting to setup a duplex stream, the bufferSize parameter
1103 // MUST be the same in both directions!
1104 *bufferSize = theSize;
1105 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
1106 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << device << ").";
1107 errorText_ = errorStream_.str();
1111 stream_.bufferSize = *bufferSize;
1112 stream_.nBuffers = 1;
1114 // Try to set "hog" mode ... it's not clear to me this is working.
1115 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) {
1117 dataSize = sizeof( hog_pid );
1118 property.mSelector = kAudioDevicePropertyHogMode;
1119 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &hog_pid );
1120 if ( result != noErr ) {
1121 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting 'hog' state!";
1122 errorText_ = errorStream_.str();
1126 if ( hog_pid != getpid() ) {
1128 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &hog_pid );
1129 if ( result != noErr ) {
1130 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting 'hog' state!";
1131 errorText_ = errorStream_.str();
1137 // Check and if necessary, change the sample rate for the device.
1138 Float64 nominalRate;
1139 dataSize = sizeof( Float64 );
1140 property.mSelector = kAudioDevicePropertyNominalSampleRate;
1141 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &nominalRate );
1142 if ( result != noErr ) {
1143 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting current sample rate.";
1144 errorText_ = errorStream_.str();
1148 // Only change the sample rate if off by more than 1 Hz.
1149 if ( fabs( nominalRate - (double)sampleRate ) > 1.0 ) {
1151 // Set a property listener for the sample rate change
1152 Float64 reportedRate = 0.0;
1153 AudioObjectPropertyAddress tmp = { kAudioDevicePropertyNominalSampleRate, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
1154 result = AudioObjectAddPropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1155 if ( result != noErr ) {
1156 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate property listener for device (" << device << ").";
1157 errorText_ = errorStream_.str();
1161 nominalRate = (Float64) sampleRate;
1162 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &nominalRate );
1163 if ( result != noErr ) {
1164 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1165 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate for device (" << device << ").";
1166 errorText_ = errorStream_.str();
1170 // Now wait until the reported nominal rate is what we just set.
1171 UInt32 microCounter = 0;
1172 while ( reportedRate != nominalRate ) {
1173 microCounter += 5000;
1174 if ( microCounter > 5000000 ) break;
1178 // Remove the property listener.
1179 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1181 if ( microCounter > 5000000 ) {
1182 errorStream_ << "RtApiCore::probeDeviceOpen: timeout waiting for sample rate update for device (" << device << ").";
1183 errorText_ = errorStream_.str();
1188 // Now set the stream format for all streams. Also, check the
1189 // physical format of the device and change that if necessary.
1190 AudioStreamBasicDescription description;
1191 dataSize = sizeof( AudioStreamBasicDescription );
1192 property.mSelector = kAudioStreamPropertyVirtualFormat;
1193 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
1194 if ( result != noErr ) {
1195 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream format for device (" << device << ").";
1196 errorText_ = errorStream_.str();
1200 // Set the sample rate and data format id. However, only make the
1201 // change if the sample rate is not within 1.0 of the desired
1202 // rate and the format is not linear pcm.
1203 bool updateFormat = false;
1204 if ( fabs( description.mSampleRate - (Float64)sampleRate ) > 1.0 ) {
1205 description.mSampleRate = (Float64) sampleRate;
1206 updateFormat = true;
1209 if ( description.mFormatID != kAudioFormatLinearPCM ) {
1210 description.mFormatID = kAudioFormatLinearPCM;
1211 updateFormat = true;
1214 if ( updateFormat ) {
1215 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &description );
1216 if ( result != noErr ) {
1217 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate or data format for device (" << device << ").";
1218 errorText_ = errorStream_.str();
1223 // Now check the physical format.
1224 property.mSelector = kAudioStreamPropertyPhysicalFormat;
1225 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
1226 if ( result != noErr ) {
1227 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream physical format for device (" << device << ").";
1228 errorText_ = errorStream_.str();
1232 //std::cout << "Current physical stream format:" << std::endl;
1233 //std::cout << " mBitsPerChan = " << description.mBitsPerChannel << std::endl;
1234 //std::cout << " aligned high = " << (description.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (description.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
1235 //std::cout << " bytesPerFrame = " << description.mBytesPerFrame << std::endl;
1236 //std::cout << " sample rate = " << description.mSampleRate << std::endl;
1238 if ( description.mFormatID != kAudioFormatLinearPCM || description.mBitsPerChannel < 16 ) {
1239 description.mFormatID = kAudioFormatLinearPCM;
1240 //description.mSampleRate = (Float64) sampleRate;
1241 AudioStreamBasicDescription testDescription = description;
1244 // We'll try higher bit rates first and then work our way down.
1245 std::vector< std::pair<UInt32, UInt32> > physicalFormats;
1246 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsFloat) & ~kLinearPCMFormatFlagIsSignedInteger;
1247 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
1248 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
1249 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
1250 physicalFormats.push_back( std::pair<Float32, UInt32>( 24, formatFlags ) ); // 24-bit packed
1251 formatFlags &= ~( kAudioFormatFlagIsPacked | kAudioFormatFlagIsAlignedHigh );
1252 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.2, formatFlags ) ); // 24-bit in 4 bytes, aligned low
1253 formatFlags |= kAudioFormatFlagIsAlignedHigh;
1254 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.4, formatFlags ) ); // 24-bit in 4 bytes, aligned high
1255 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
1256 physicalFormats.push_back( std::pair<Float32, UInt32>( 16, formatFlags ) );
1257 physicalFormats.push_back( std::pair<Float32, UInt32>( 8, formatFlags ) );
1259 bool setPhysicalFormat = false;
1260 for( unsigned int i=0; i<physicalFormats.size(); i++ ) {
1261 testDescription = description;
1262 testDescription.mBitsPerChannel = (UInt32) physicalFormats[i].first;
1263 testDescription.mFormatFlags = physicalFormats[i].second;
1264 if ( (24 == (UInt32)physicalFormats[i].first) && ~( physicalFormats[i].second & kAudioFormatFlagIsPacked ) )
1265 testDescription.mBytesPerFrame = 4 * testDescription.mChannelsPerFrame;
1267 testDescription.mBytesPerFrame = testDescription.mBitsPerChannel/8 * testDescription.mChannelsPerFrame;
1268 testDescription.mBytesPerPacket = testDescription.mBytesPerFrame * testDescription.mFramesPerPacket;
1269 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &testDescription );
1270 if ( result == noErr ) {
1271 setPhysicalFormat = true;
1272 //std::cout << "Updated physical stream format:" << std::endl;
1273 //std::cout << " mBitsPerChan = " << testDescription.mBitsPerChannel << std::endl;
1274 //std::cout << " aligned high = " << (testDescription.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (testDescription.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
1275 //std::cout << " bytesPerFrame = " << testDescription.mBytesPerFrame << std::endl;
1276 //std::cout << " sample rate = " << testDescription.mSampleRate << std::endl;
1281 if ( !setPhysicalFormat ) {
1282 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting physical data format for device (" << device << ").";
1283 errorText_ = errorStream_.str();
1286 } // done setting virtual/physical formats.
1288 // Get the stream / device latency.
1290 dataSize = sizeof( UInt32 );
1291 property.mSelector = kAudioDevicePropertyLatency;
1292 if ( AudioObjectHasProperty( id, &property ) == true ) {
1293 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &latency );
1294 if ( result == kAudioHardwareNoError ) stream_.latency[ mode ] = latency;
1296 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting device latency for device (" << device << ").";
1297 errorText_ = errorStream_.str();
1298 error( RtAudioError::WARNING );
1302 // Byte-swapping: According to AudioHardware.h, the stream data will
1303 // always be presented in native-endian format, so we should never
1304 // need to byte swap.
1305 stream_.doByteSwap[mode] = false;
1307 // From the CoreAudio documentation, PCM data must be supplied as
1309 stream_.userFormat = format;
1310 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
1312 if ( streamCount == 1 )
1313 stream_.nDeviceChannels[mode] = description.mChannelsPerFrame;
1314 else // multiple streams
1315 stream_.nDeviceChannels[mode] = channels;
1316 stream_.nUserChannels[mode] = channels;
1317 stream_.channelOffset[mode] = channelOffset; // offset within a CoreAudio stream
1318 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
1319 else stream_.userInterleaved = true;
1320 stream_.deviceInterleaved[mode] = true;
1321 if ( monoMode == true ) stream_.deviceInterleaved[mode] = false;
1323 // Set flags for buffer conversion.
1324 stream_.doConvertBuffer[mode] = false;
1325 if ( stream_.userFormat != stream_.deviceFormat[mode] )
1326 stream_.doConvertBuffer[mode] = true;
1327 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
1328 stream_.doConvertBuffer[mode] = true;
1329 if ( streamCount == 1 ) {
1330 if ( stream_.nUserChannels[mode] > 1 &&
1331 stream_.userInterleaved != stream_.deviceInterleaved[mode] )
1332 stream_.doConvertBuffer[mode] = true;
1334 else if ( monoMode && stream_.userInterleaved )
1335 stream_.doConvertBuffer[mode] = true;
1337 // Allocate our CoreHandle structure for the stream.
1338 CoreHandle *handle = 0;
1339 if ( stream_.apiHandle == 0 ) {
1341 handle = new CoreHandle;
1343 catch ( std::bad_alloc& ) {
1344 errorText_ = "RtApiCore::probeDeviceOpen: error allocating CoreHandle memory.";
1348 if ( pthread_cond_init( &handle->condition, NULL ) ) {
1349 errorText_ = "RtApiCore::probeDeviceOpen: error initializing pthread condition variable.";
1352 stream_.apiHandle = (void *) handle;
1355 handle = (CoreHandle *) stream_.apiHandle;
1356 handle->iStream[mode] = firstStream;
1357 handle->nStreams[mode] = streamCount;
1358 handle->id[mode] = id;
1360 // Allocate necessary internal buffers.
1361 unsigned long bufferBytes;
1362 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
1363 // stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
1364 stream_.userBuffer[mode] = (char *) malloc( bufferBytes * sizeof(char) );
1365 memset( stream_.userBuffer[mode], 0, bufferBytes * sizeof(char) );
1366 if ( stream_.userBuffer[mode] == NULL ) {
1367 errorText_ = "RtApiCore::probeDeviceOpen: error allocating user buffer memory.";
1371 // If possible, we will make use of the CoreAudio stream buffers as
1372 // "device buffers". However, we can't do this if using multiple
1374 if ( stream_.doConvertBuffer[mode] && handle->nStreams[mode] > 1 ) {
1376 bool makeBuffer = true;
1377 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
1378 if ( mode == INPUT ) {
1379 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
1380 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
1381 if ( bufferBytes <= bytesOut ) makeBuffer = false;
1386 bufferBytes *= *bufferSize;
1387 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
1388 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
1389 if ( stream_.deviceBuffer == NULL ) {
1390 errorText_ = "RtApiCore::probeDeviceOpen: error allocating device buffer memory.";
1396 stream_.sampleRate = sampleRate;
1397 stream_.device[mode] = device;
1398 stream_.state = STREAM_STOPPED;
1399 stream_.callbackInfo.object = (void *) this;
1401 // Setup the buffer conversion information structure.
1402 if ( stream_.doConvertBuffer[mode] ) {
1403 if ( streamCount > 1 ) setConvertInfo( mode, 0 );
1404 else setConvertInfo( mode, channelOffset );
1407 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device )
1408 // Only one callback procedure per device.
1409 stream_.mode = DUPLEX;
1411 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1412 result = AudioDeviceCreateIOProcID( id, callbackHandler, (void *) &stream_.callbackInfo, &handle->procId[mode] );
1414 // deprecated in favor of AudioDeviceCreateIOProcID()
1415 result = AudioDeviceAddIOProc( id, callbackHandler, (void *) &stream_.callbackInfo );
1417 if ( result != noErr ) {
1418 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting callback for device (" << device << ").";
1419 errorText_ = errorStream_.str();
1422 if ( stream_.mode == OUTPUT && mode == INPUT )
1423 stream_.mode = DUPLEX;
1425 stream_.mode = mode;
1428 // Setup the device property listener for over/underload.
1429 property.mSelector = kAudioDeviceProcessorOverload;
1430 property.mScope = kAudioObjectPropertyScopeGlobal;
1431 result = AudioObjectAddPropertyListener( id, &property, xrunListener, (void *) handle );
1437 pthread_cond_destroy( &handle->condition );
1439 stream_.apiHandle = 0;
1442 for ( int i=0; i<2; i++ ) {
1443 if ( stream_.userBuffer[i] ) {
1444 free( stream_.userBuffer[i] );
1445 stream_.userBuffer[i] = 0;
1449 if ( stream_.deviceBuffer ) {
1450 free( stream_.deviceBuffer );
1451 stream_.deviceBuffer = 0;
1454 stream_.state = STREAM_CLOSED;
1458 void RtApiCore :: closeStream( void )
1460 if ( stream_.state == STREAM_CLOSED ) {
1461 errorText_ = "RtApiCore::closeStream(): no open stream to close!";
1462 error( RtAudioError::WARNING );
1466 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1467 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1469 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
1470 kAudioObjectPropertyScopeGlobal,
1471 kAudioObjectPropertyElementMaster };
1473 property.mSelector = kAudioDeviceProcessorOverload;
1474 property.mScope = kAudioObjectPropertyScopeGlobal;
1475 if (AudioObjectRemovePropertyListener( handle->id[0], &property, xrunListener, (void *) handle ) != noErr) {
1476 errorText_ = "RtApiCore::closeStream(): error removing property listener!";
1477 error( RtAudioError::WARNING );
1480 if ( stream_.state == STREAM_RUNNING )
1481 AudioDeviceStop( handle->id[0], callbackHandler );
1482 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1483 AudioDeviceDestroyIOProcID( handle->id[0], handle->procId[0] );
1485 // deprecated in favor of AudioDeviceDestroyIOProcID()
1486 AudioDeviceRemoveIOProc( handle->id[0], callbackHandler );
1490 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1492 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
1493 kAudioObjectPropertyScopeGlobal,
1494 kAudioObjectPropertyElementMaster };
1496 property.mSelector = kAudioDeviceProcessorOverload;
1497 property.mScope = kAudioObjectPropertyScopeGlobal;
1498 if (AudioObjectRemovePropertyListener( handle->id[1], &property, xrunListener, (void *) handle ) != noErr) {
1499 errorText_ = "RtApiCore::closeStream(): error removing property listener!";
1500 error( RtAudioError::WARNING );
1503 if ( stream_.state == STREAM_RUNNING )
1504 AudioDeviceStop( handle->id[1], callbackHandler );
1505 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1506 AudioDeviceDestroyIOProcID( handle->id[1], handle->procId[1] );
1508 // deprecated in favor of AudioDeviceDestroyIOProcID()
1509 AudioDeviceRemoveIOProc( handle->id[1], callbackHandler );
1513 for ( int i=0; i<2; i++ ) {
1514 if ( stream_.userBuffer[i] ) {
1515 free( stream_.userBuffer[i] );
1516 stream_.userBuffer[i] = 0;
1520 if ( stream_.deviceBuffer ) {
1521 free( stream_.deviceBuffer );
1522 stream_.deviceBuffer = 0;
1525 // Destroy pthread condition variable.
1526 pthread_cond_destroy( &handle->condition );
1528 stream_.apiHandle = 0;
1530 stream_.mode = UNINITIALIZED;
1531 stream_.state = STREAM_CLOSED;
1534 void RtApiCore :: startStream( void )
1537 if ( stream_.state == STREAM_RUNNING ) {
1538 errorText_ = "RtApiCore::startStream(): the stream is already running!";
1539 error( RtAudioError::WARNING );
1543 OSStatus result = noErr;
1544 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1545 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1547 result = AudioDeviceStart( handle->id[0], callbackHandler );
1548 if ( result != noErr ) {
1549 errorStream_ << "RtApiCore::startStream: system error (" << getErrorCode( result ) << ") starting callback procedure on device (" << stream_.device[0] << ").";
1550 errorText_ = errorStream_.str();
1555 if ( stream_.mode == INPUT ||
1556 ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1558 result = AudioDeviceStart( handle->id[1], callbackHandler );
1559 if ( result != noErr ) {
1560 errorStream_ << "RtApiCore::startStream: system error starting input callback procedure on device (" << stream_.device[1] << ").";
1561 errorText_ = errorStream_.str();
1566 handle->drainCounter = 0;
1567 handle->internalDrain = false;
1568 stream_.state = STREAM_RUNNING;
1571 if ( result == noErr ) return;
1572 error( RtAudioError::SYSTEM_ERROR );
1575 void RtApiCore :: stopStream( void )
1578 if ( stream_.state == STREAM_STOPPED ) {
1579 errorText_ = "RtApiCore::stopStream(): the stream is already stopped!";
1580 error( RtAudioError::WARNING );
1584 OSStatus result = noErr;
1585 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1586 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1588 if ( handle->drainCounter == 0 ) {
1589 handle->drainCounter = 2;
1590 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
1593 result = AudioDeviceStop( handle->id[0], callbackHandler );
1594 if ( result != noErr ) {
1595 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping callback procedure on device (" << stream_.device[0] << ").";
1596 errorText_ = errorStream_.str();
1601 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1603 result = AudioDeviceStop( handle->id[1], callbackHandler );
1604 if ( result != noErr ) {
1605 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping input callback procedure on device (" << stream_.device[1] << ").";
1606 errorText_ = errorStream_.str();
1611 stream_.state = STREAM_STOPPED;
1614 if ( result == noErr ) return;
1615 error( RtAudioError::SYSTEM_ERROR );
1618 void RtApiCore :: abortStream( void )
1621 if ( stream_.state == STREAM_STOPPED ) {
1622 errorText_ = "RtApiCore::abortStream(): the stream is already stopped!";
1623 error( RtAudioError::WARNING );
1627 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1628 handle->drainCounter = 2;
1633 // This function will be called by a spawned thread when the user
1634 // callback function signals that the stream should be stopped or
1635 // aborted. It is better to handle it this way because the
1636 // callbackEvent() function probably should return before the AudioDeviceStop()
1637 // function is called.
1638 static void *coreStopStream( void *ptr )
1640 CallbackInfo *info = (CallbackInfo *) ptr;
1641 RtApiCore *object = (RtApiCore *) info->object;
1643 object->stopStream();
1644 pthread_exit( NULL );
1647 bool RtApiCore :: callbackEvent( AudioDeviceID deviceId,
1648 const AudioBufferList *inBufferList,
1649 const AudioBufferList *outBufferList )
1651 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
1652 if ( stream_.state == STREAM_CLOSED ) {
1653 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
1654 error( RtAudioError::WARNING );
1658 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
1659 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1661 // Check if we were draining the stream and signal is finished.
1662 if ( handle->drainCounter > 3 ) {
1663 ThreadHandle threadId;
1665 stream_.state = STREAM_STOPPING;
1666 if ( handle->internalDrain == true )
1667 pthread_create( &threadId, NULL, coreStopStream, info );
1668 else // external call to stopStream()
1669 pthread_cond_signal( &handle->condition );
1673 AudioDeviceID outputDevice = handle->id[0];
1675 // Invoke user callback to get fresh output data UNLESS we are
1676 // draining stream or duplex mode AND the input/output devices are
1677 // different AND this function is called for the input device.
1678 if ( handle->drainCounter == 0 && ( stream_.mode != DUPLEX || deviceId == outputDevice ) ) {
1679 RtAudioCallback callback = (RtAudioCallback) info->callback;
1680 double streamTime = getStreamTime();
1681 RtAudioStreamStatus status = 0;
1682 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
1683 status |= RTAUDIO_OUTPUT_UNDERFLOW;
1684 handle->xrun[0] = false;
1686 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
1687 status |= RTAUDIO_INPUT_OVERFLOW;
1688 handle->xrun[1] = false;
1691 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
1692 stream_.bufferSize, streamTime, status, info->userData );
1693 if ( cbReturnValue == 2 ) {
1694 stream_.state = STREAM_STOPPING;
1695 handle->drainCounter = 2;
1699 else if ( cbReturnValue == 1 ) {
1700 handle->drainCounter = 1;
1701 handle->internalDrain = true;
1705 if ( stream_.mode == OUTPUT || ( stream_.mode == DUPLEX && deviceId == outputDevice ) ) {
1707 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
1709 if ( handle->nStreams[0] == 1 ) {
1710 memset( outBufferList->mBuffers[handle->iStream[0]].mData,
1712 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
1714 else { // fill multiple streams with zeros
1715 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
1716 memset( outBufferList->mBuffers[handle->iStream[0]+i].mData,
1718 outBufferList->mBuffers[handle->iStream[0]+i].mDataByteSize );
1722 else if ( handle->nStreams[0] == 1 ) {
1723 if ( stream_.doConvertBuffer[0] ) { // convert directly to CoreAudio stream buffer
1724 convertBuffer( (char *) outBufferList->mBuffers[handle->iStream[0]].mData,
1725 stream_.userBuffer[0], stream_.convertInfo[0] );
1727 else { // copy from user buffer
1728 memcpy( outBufferList->mBuffers[handle->iStream[0]].mData,
1729 stream_.userBuffer[0],
1730 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
1733 else { // fill multiple streams
1734 Float32 *inBuffer = (Float32 *) stream_.userBuffer[0];
1735 if ( stream_.doConvertBuffer[0] ) {
1736 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
1737 inBuffer = (Float32 *) stream_.deviceBuffer;
1740 if ( stream_.deviceInterleaved[0] == false ) { // mono mode
1741 UInt32 bufferBytes = outBufferList->mBuffers[handle->iStream[0]].mDataByteSize;
1742 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
1743 memcpy( outBufferList->mBuffers[handle->iStream[0]+i].mData,
1744 (void *)&inBuffer[i*stream_.bufferSize], bufferBytes );
1747 else { // fill multiple multi-channel streams with interleaved data
1748 UInt32 streamChannels, channelsLeft, inJump, outJump, inOffset;
1751 bool inInterleaved = ( stream_.userInterleaved ) ? true : false;
1752 UInt32 inChannels = stream_.nUserChannels[0];
1753 if ( stream_.doConvertBuffer[0] ) {
1754 inInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
1755 inChannels = stream_.nDeviceChannels[0];
1758 if ( inInterleaved ) inOffset = 1;
1759 else inOffset = stream_.bufferSize;
1761 channelsLeft = inChannels;
1762 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
1764 out = (Float32 *) outBufferList->mBuffers[handle->iStream[0]+i].mData;
1765 streamChannels = outBufferList->mBuffers[handle->iStream[0]+i].mNumberChannels;
1768 // Account for possible channel offset in first stream
1769 if ( i == 0 && stream_.channelOffset[0] > 0 ) {
1770 streamChannels -= stream_.channelOffset[0];
1771 outJump = stream_.channelOffset[0];
1775 // Account for possible unfilled channels at end of the last stream
1776 if ( streamChannels > channelsLeft ) {
1777 outJump = streamChannels - channelsLeft;
1778 streamChannels = channelsLeft;
1781 // Determine input buffer offsets and skips
1782 if ( inInterleaved ) {
1783 inJump = inChannels;
1784 in += inChannels - channelsLeft;
1788 in += (inChannels - channelsLeft) * inOffset;
1791 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
1792 for ( unsigned int j=0; j<streamChannels; j++ ) {
1793 *out++ = in[j*inOffset];
1798 channelsLeft -= streamChannels;
1804 // Don't bother draining input
1805 if ( handle->drainCounter ) {
1806 handle->drainCounter++;
1810 AudioDeviceID inputDevice;
1811 inputDevice = handle->id[1];
1812 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && deviceId == inputDevice ) ) {
1814 if ( handle->nStreams[1] == 1 ) {
1815 if ( stream_.doConvertBuffer[1] ) { // convert directly from CoreAudio stream buffer
1816 convertBuffer( stream_.userBuffer[1],
1817 (char *) inBufferList->mBuffers[handle->iStream[1]].mData,
1818 stream_.convertInfo[1] );
1820 else { // copy to user buffer
1821 memcpy( stream_.userBuffer[1],
1822 inBufferList->mBuffers[handle->iStream[1]].mData,
1823 inBufferList->mBuffers[handle->iStream[1]].mDataByteSize );
1826 else { // read from multiple streams
1827 Float32 *outBuffer = (Float32 *) stream_.userBuffer[1];
1828 if ( stream_.doConvertBuffer[1] ) outBuffer = (Float32 *) stream_.deviceBuffer;
1830 if ( stream_.deviceInterleaved[1] == false ) { // mono mode
1831 UInt32 bufferBytes = inBufferList->mBuffers[handle->iStream[1]].mDataByteSize;
1832 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
1833 memcpy( (void *)&outBuffer[i*stream_.bufferSize],
1834 inBufferList->mBuffers[handle->iStream[1]+i].mData, bufferBytes );
1837 else { // read from multiple multi-channel streams
1838 UInt32 streamChannels, channelsLeft, inJump, outJump, outOffset;
1841 bool outInterleaved = ( stream_.userInterleaved ) ? true : false;
1842 UInt32 outChannels = stream_.nUserChannels[1];
1843 if ( stream_.doConvertBuffer[1] ) {
1844 outInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
1845 outChannels = stream_.nDeviceChannels[1];
1848 if ( outInterleaved ) outOffset = 1;
1849 else outOffset = stream_.bufferSize;
1851 channelsLeft = outChannels;
1852 for ( unsigned int i=0; i<handle->nStreams[1]; i++ ) {
1854 in = (Float32 *) inBufferList->mBuffers[handle->iStream[1]+i].mData;
1855 streamChannels = inBufferList->mBuffers[handle->iStream[1]+i].mNumberChannels;
1858 // Account for possible channel offset in first stream
1859 if ( i == 0 && stream_.channelOffset[1] > 0 ) {
1860 streamChannels -= stream_.channelOffset[1];
1861 inJump = stream_.channelOffset[1];
1865 // Account for possible unread channels at end of the last stream
1866 if ( streamChannels > channelsLeft ) {
1867 inJump = streamChannels - channelsLeft;
1868 streamChannels = channelsLeft;
1871 // Determine output buffer offsets and skips
1872 if ( outInterleaved ) {
1873 outJump = outChannels;
1874 out += outChannels - channelsLeft;
1878 out += (outChannels - channelsLeft) * outOffset;
1881 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
1882 for ( unsigned int j=0; j<streamChannels; j++ ) {
1883 out[j*outOffset] = *in++;
1888 channelsLeft -= streamChannels;
1892 if ( stream_.doConvertBuffer[1] ) { // convert from our internal "device" buffer
1893 convertBuffer( stream_.userBuffer[1],
1894 stream_.deviceBuffer,
1895 stream_.convertInfo[1] );
1901 //MUTEX_UNLOCK( &stream_.mutex );
1903 RtApi::tickStreamTime();
1907 const char* RtApiCore :: getErrorCode( OSStatus code )
1911 case kAudioHardwareNotRunningError:
1912 return "kAudioHardwareNotRunningError";
1914 case kAudioHardwareUnspecifiedError:
1915 return "kAudioHardwareUnspecifiedError";
1917 case kAudioHardwareUnknownPropertyError:
1918 return "kAudioHardwareUnknownPropertyError";
1920 case kAudioHardwareBadPropertySizeError:
1921 return "kAudioHardwareBadPropertySizeError";
1923 case kAudioHardwareIllegalOperationError:
1924 return "kAudioHardwareIllegalOperationError";
1926 case kAudioHardwareBadObjectError:
1927 return "kAudioHardwareBadObjectError";
1929 case kAudioHardwareBadDeviceError:
1930 return "kAudioHardwareBadDeviceError";
1932 case kAudioHardwareBadStreamError:
1933 return "kAudioHardwareBadStreamError";
1935 case kAudioHardwareUnsupportedOperationError:
1936 return "kAudioHardwareUnsupportedOperationError";
1938 case kAudioDeviceUnsupportedFormatError:
1939 return "kAudioDeviceUnsupportedFormatError";
1941 case kAudioDevicePermissionsError:
1942 return "kAudioDevicePermissionsError";
1945 return "CoreAudio unknown error";
1949 //******************** End of __MACOSX_CORE__ *********************//
1952 #if defined(__UNIX_JACK__)
1954 // JACK is a low-latency audio server, originally written for the
1955 // GNU/Linux operating system and now also ported to OS-X. It can
1956 // connect a number of different applications to an audio device, as
1957 // well as allowing them to share audio between themselves.
1959 // When using JACK with RtAudio, "devices" refer to JACK clients that
1960 // have ports connected to the server. The JACK server is typically
1961 // started in a terminal as follows:
1963 // .jackd -d alsa -d hw:0
1965 // or through an interface program such as qjackctl. Many of the
1966 // parameters normally set for a stream are fixed by the JACK server
1967 // and can be specified when the JACK server is started. In
1970 // .jackd -d alsa -d hw:0 -r 44100 -p 512 -n 4
1972 // specifies a sample rate of 44100 Hz, a buffer size of 512 sample
1973 // frames, and number of buffers = 4. Once the server is running, it
1974 // is not possible to override these values. If the values are not
1975 // specified in the command-line, the JACK server uses default values.
1977 // The JACK server does not have to be running when an instance of
1978 // RtApiJack is created, though the function getDeviceCount() will
1979 // report 0 devices found until JACK has been started. When no
1980 // devices are available (i.e., the JACK server is not running), a
1981 // stream cannot be opened.
1983 #include <jack/jack.h>
1987 // A structure to hold various information related to the Jack API
1990 jack_client_t *client;
1991 jack_port_t **ports[2];
1992 std::string deviceName[2];
1994 pthread_cond_t condition;
1995 int drainCounter; // Tracks callback counts when draining
1996 bool internalDrain; // Indicates if stop is initiated from callback or not.
1999 :client(0), drainCounter(0), internalDrain(false) { ports[0] = 0; ports[1] = 0; xrun[0] = false; xrun[1] = false; }
2002 #if !defined(__RTAUDIO_DEBUG__)
2003 static void jackSilentError( const char * ) {};
2006 RtApiJack :: RtApiJack()
2007 :shouldAutoconnect_(true) {
2008 // Nothing to do here.
2009 #if !defined(__RTAUDIO_DEBUG__)
2010 // Turn off Jack's internal error reporting.
2011 jack_set_error_function( &jackSilentError );
2015 RtApiJack :: ~RtApiJack()
2017 if ( stream_.state != STREAM_CLOSED ) closeStream();
2020 unsigned int RtApiJack :: getDeviceCount( void )
2022 // See if we can become a jack client.
2023 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
2024 jack_status_t *status = NULL;
2025 jack_client_t *client = jack_client_open( "RtApiJackCount", options, status );
2026 if ( client == 0 ) return 0;
2029 std::string port, previousPort;
2030 unsigned int nChannels = 0, nDevices = 0;
2031 ports = jack_get_ports( client, NULL, NULL, 0 );
2033 // Parse the port names up to the first colon (:).
2036 port = (char *) ports[ nChannels ];
2037 iColon = port.find(":");
2038 if ( iColon != std::string::npos ) {
2039 port = port.substr( 0, iColon + 1 );
2040 if ( port != previousPort ) {
2042 previousPort = port;
2045 } while ( ports[++nChannels] );
2049 jack_client_close( client );
2053 RtAudio::DeviceInfo RtApiJack :: getDeviceInfo( unsigned int device )
2055 RtAudio::DeviceInfo info;
2056 info.probed = false;
2058 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption
2059 jack_status_t *status = NULL;
2060 jack_client_t *client = jack_client_open( "RtApiJackInfo", options, status );
2061 if ( client == 0 ) {
2062 errorText_ = "RtApiJack::getDeviceInfo: Jack server not found or connection error!";
2063 error( RtAudioError::WARNING );
2068 std::string port, previousPort;
2069 unsigned int nPorts = 0, nDevices = 0;
2070 ports = jack_get_ports( client, NULL, NULL, 0 );
2072 // Parse the port names up to the first colon (:).
2075 port = (char *) ports[ nPorts ];
2076 iColon = port.find(":");
2077 if ( iColon != std::string::npos ) {
2078 port = port.substr( 0, iColon );
2079 if ( port != previousPort ) {
2080 if ( nDevices == device ) info.name = port;
2082 previousPort = port;
2085 } while ( ports[++nPorts] );
2089 if ( device >= nDevices ) {
2090 jack_client_close( client );
2091 errorText_ = "RtApiJack::getDeviceInfo: device ID is invalid!";
2092 error( RtAudioError::INVALID_USE );
2096 // Get the current jack server sample rate.
2097 info.sampleRates.clear();
2099 info.preferredSampleRate = jack_get_sample_rate( client );
2100 info.sampleRates.push_back( info.preferredSampleRate );
2102 // Count the available ports containing the client name as device
2103 // channels. Jack "input ports" equal RtAudio output channels.
2104 unsigned int nChannels = 0;
2105 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsInput );
2107 while ( ports[ nChannels ] ) nChannels++;
2109 info.outputChannels = nChannels;
2112 // Jack "output ports" equal RtAudio input channels.
2114 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsOutput );
2116 while ( ports[ nChannels ] ) nChannels++;
2118 info.inputChannels = nChannels;
2121 if ( info.outputChannels == 0 && info.inputChannels == 0 ) {
2122 jack_client_close(client);
2123 errorText_ = "RtApiJack::getDeviceInfo: error determining Jack input/output channels!";
2124 error( RtAudioError::WARNING );
2128 // If device opens for both playback and capture, we determine the channels.
2129 if ( info.outputChannels > 0 && info.inputChannels > 0 )
2130 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
2132 // Jack always uses 32-bit floats.
2133 info.nativeFormats = RTAUDIO_FLOAT32;
2135 // Jack doesn't provide default devices so we'll use the first available one.
2136 if ( device == 0 && info.outputChannels > 0 )
2137 info.isDefaultOutput = true;
2138 if ( device == 0 && info.inputChannels > 0 )
2139 info.isDefaultInput = true;
2141 jack_client_close(client);
2146 static int jackCallbackHandler( jack_nframes_t nframes, void *infoPointer )
2148 CallbackInfo *info = (CallbackInfo *) infoPointer;
2150 RtApiJack *object = (RtApiJack *) info->object;
2151 if ( object->callbackEvent( (unsigned long) nframes ) == false ) return 1;
2156 // This function will be called by a spawned thread when the Jack
2157 // server signals that it is shutting down. It is necessary to handle
2158 // it this way because the jackShutdown() function must return before
2159 // the jack_deactivate() function (in closeStream()) will return.
2160 static void *jackCloseStream( void *ptr )
2162 CallbackInfo *info = (CallbackInfo *) ptr;
2163 RtApiJack *object = (RtApiJack *) info->object;
2165 object->closeStream();
2167 pthread_exit( NULL );
2169 static void jackShutdown( void *infoPointer )
2171 CallbackInfo *info = (CallbackInfo *) infoPointer;
2172 RtApiJack *object = (RtApiJack *) info->object;
2174 // Check current stream state. If stopped, then we'll assume this
2175 // was called as a result of a call to RtApiJack::stopStream (the
2176 // deactivation of a client handle causes this function to be called).
2177 // If not, we'll assume the Jack server is shutting down or some
2178 // other problem occurred and we should close the stream.
2179 if ( object->isStreamRunning() == false ) return;
2181 ThreadHandle threadId;
2182 pthread_create( &threadId, NULL, jackCloseStream, info );
2183 std::cerr << "\nRtApiJack: the Jack server is shutting down this client ... stream stopped and closed!!\n" << std::endl;
2186 static int jackXrun( void *infoPointer )
2188 JackHandle *handle = *((JackHandle **) infoPointer);
2190 if ( handle->ports[0] ) handle->xrun[0] = true;
2191 if ( handle->ports[1] ) handle->xrun[1] = true;
2196 bool RtApiJack :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
2197 unsigned int firstChannel, unsigned int sampleRate,
2198 RtAudioFormat format, unsigned int *bufferSize,
2199 RtAudio::StreamOptions *options )
2201 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2203 // Look for jack server and try to become a client (only do once per stream).
2204 jack_client_t *client = 0;
2205 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) {
2206 jack_options_t jackoptions = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
2207 jack_status_t *status = NULL;
2208 if ( options && !options->streamName.empty() )
2209 client = jack_client_open( options->streamName.c_str(), jackoptions, status );
2211 client = jack_client_open( "RtApiJack", jackoptions, status );
2212 if ( client == 0 ) {
2213 errorText_ = "RtApiJack::probeDeviceOpen: Jack server not found or connection error!";
2214 error( RtAudioError::WARNING );
2219 // The handle must have been created on an earlier pass.
2220 client = handle->client;
2224 std::string port, previousPort, deviceName;
2225 unsigned int nPorts = 0, nDevices = 0;
2226 ports = jack_get_ports( client, NULL, NULL, 0 );
2228 // Parse the port names up to the first colon (:).
2231 port = (char *) ports[ nPorts ];
2232 iColon = port.find(":");
2233 if ( iColon != std::string::npos ) {
2234 port = port.substr( 0, iColon );
2235 if ( port != previousPort ) {
2236 if ( nDevices == device ) deviceName = port;
2238 previousPort = port;
2241 } while ( ports[++nPorts] );
2245 if ( device >= nDevices ) {
2246 errorText_ = "RtApiJack::probeDeviceOpen: device ID is invalid!";
2250 // Count the available ports containing the client name as device
2251 // channels. Jack "input ports" equal RtAudio output channels.
2252 unsigned int nChannels = 0;
2253 unsigned long flag = JackPortIsInput;
2254 if ( mode == INPUT ) flag = JackPortIsOutput;
2255 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
2257 while ( ports[ nChannels ] ) nChannels++;
2261 // Compare the jack ports for specified client to the requested number of channels.
2262 if ( nChannels < (channels + firstChannel) ) {
2263 errorStream_ << "RtApiJack::probeDeviceOpen: requested number of channels (" << channels << ") + offset (" << firstChannel << ") not found for specified device (" << device << ":" << deviceName << ").";
2264 errorText_ = errorStream_.str();
2268 // Check the jack server sample rate.
2269 unsigned int jackRate = jack_get_sample_rate( client );
2270 if ( sampleRate != jackRate ) {
2271 jack_client_close( client );
2272 errorStream_ << "RtApiJack::probeDeviceOpen: the requested sample rate (" << sampleRate << ") is different than the JACK server rate (" << jackRate << ").";
2273 errorText_ = errorStream_.str();
2276 stream_.sampleRate = jackRate;
2278 // Get the latency of the JACK port.
2279 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
2280 if ( ports[ firstChannel ] ) {
2282 jack_latency_callback_mode_t cbmode = (mode == INPUT ? JackCaptureLatency : JackPlaybackLatency);
2283 // the range (usually the min and max are equal)
2284 jack_latency_range_t latrange; latrange.min = latrange.max = 0;
2285 // get the latency range
2286 jack_port_get_latency_range( jack_port_by_name( client, ports[firstChannel] ), cbmode, &latrange );
2287 // be optimistic, use the min!
2288 stream_.latency[mode] = latrange.min;
2289 //stream_.latency[mode] = jack_port_get_latency( jack_port_by_name( client, ports[ firstChannel ] ) );
2293 // The jack server always uses 32-bit floating-point data.
2294 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
2295 stream_.userFormat = format;
2297 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
2298 else stream_.userInterleaved = true;
2300 // Jack always uses non-interleaved buffers.
2301 stream_.deviceInterleaved[mode] = false;
2303 // Jack always provides host byte-ordered data.
2304 stream_.doByteSwap[mode] = false;
2306 // Get the buffer size. The buffer size and number of buffers
2307 // (periods) is set when the jack server is started.
2308 stream_.bufferSize = (int) jack_get_buffer_size( client );
2309 *bufferSize = stream_.bufferSize;
2311 stream_.nDeviceChannels[mode] = channels;
2312 stream_.nUserChannels[mode] = channels;
2314 // Set flags for buffer conversion.
2315 stream_.doConvertBuffer[mode] = false;
2316 if ( stream_.userFormat != stream_.deviceFormat[mode] )
2317 stream_.doConvertBuffer[mode] = true;
2318 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
2319 stream_.nUserChannels[mode] > 1 )
2320 stream_.doConvertBuffer[mode] = true;
2322 // Allocate our JackHandle structure for the stream.
2323 if ( handle == 0 ) {
2325 handle = new JackHandle;
2327 catch ( std::bad_alloc& ) {
2328 errorText_ = "RtApiJack::probeDeviceOpen: error allocating JackHandle memory.";
2332 if ( pthread_cond_init(&handle->condition, NULL) ) {
2333 errorText_ = "RtApiJack::probeDeviceOpen: error initializing pthread condition variable.";
2336 stream_.apiHandle = (void *) handle;
2337 handle->client = client;
2339 handle->deviceName[mode] = deviceName;
2341 // Allocate necessary internal buffers.
2342 unsigned long bufferBytes;
2343 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
2344 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
2345 if ( stream_.userBuffer[mode] == NULL ) {
2346 errorText_ = "RtApiJack::probeDeviceOpen: error allocating user buffer memory.";
2350 if ( stream_.doConvertBuffer[mode] ) {
2352 bool makeBuffer = true;
2353 if ( mode == OUTPUT )
2354 bufferBytes = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
2355 else { // mode == INPUT
2356 bufferBytes = stream_.nDeviceChannels[1] * formatBytes( stream_.deviceFormat[1] );
2357 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
2358 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes(stream_.deviceFormat[0]);
2359 if ( bufferBytes < bytesOut ) makeBuffer = false;
2364 bufferBytes *= *bufferSize;
2365 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
2366 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
2367 if ( stream_.deviceBuffer == NULL ) {
2368 errorText_ = "RtApiJack::probeDeviceOpen: error allocating device buffer memory.";
2374 // Allocate memory for the Jack ports (channels) identifiers.
2375 handle->ports[mode] = (jack_port_t **) malloc ( sizeof (jack_port_t *) * channels );
2376 if ( handle->ports[mode] == NULL ) {
2377 errorText_ = "RtApiJack::probeDeviceOpen: error allocating port memory.";
2381 stream_.device[mode] = device;
2382 stream_.channelOffset[mode] = firstChannel;
2383 stream_.state = STREAM_STOPPED;
2384 stream_.callbackInfo.object = (void *) this;
2386 if ( stream_.mode == OUTPUT && mode == INPUT )
2387 // We had already set up the stream for output.
2388 stream_.mode = DUPLEX;
2390 stream_.mode = mode;
2391 jack_set_process_callback( handle->client, jackCallbackHandler, (void *) &stream_.callbackInfo );
2392 jack_set_xrun_callback( handle->client, jackXrun, (void *) &stream_.apiHandle );
2393 jack_on_shutdown( handle->client, jackShutdown, (void *) &stream_.callbackInfo );
2396 // Register our ports.
2398 if ( mode == OUTPUT ) {
2399 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2400 snprintf( label, 64, "outport %d", i );
2401 handle->ports[0][i] = jack_port_register( handle->client, (const char *)label,
2402 JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput, 0 );
2406 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2407 snprintf( label, 64, "inport %d", i );
2408 handle->ports[1][i] = jack_port_register( handle->client, (const char *)label,
2409 JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput, 0 );
2413 // Setup the buffer conversion information structure. We don't use
2414 // buffers to do channel offsets, so we override that parameter
2416 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
2418 if ( options && options->flags & RTAUDIO_JACK_DONT_CONNECT ) shouldAutoconnect_ = false;
2424 pthread_cond_destroy( &handle->condition );
2425 jack_client_close( handle->client );
2427 if ( handle->ports[0] ) free( handle->ports[0] );
2428 if ( handle->ports[1] ) free( handle->ports[1] );
2431 stream_.apiHandle = 0;
2434 for ( int i=0; i<2; i++ ) {
2435 if ( stream_.userBuffer[i] ) {
2436 free( stream_.userBuffer[i] );
2437 stream_.userBuffer[i] = 0;
2441 if ( stream_.deviceBuffer ) {
2442 free( stream_.deviceBuffer );
2443 stream_.deviceBuffer = 0;
2449 void RtApiJack :: closeStream( void )
2451 if ( stream_.state == STREAM_CLOSED ) {
2452 errorText_ = "RtApiJack::closeStream(): no open stream to close!";
2453 error( RtAudioError::WARNING );
2457 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2460 if ( stream_.state == STREAM_RUNNING )
2461 jack_deactivate( handle->client );
2463 jack_client_close( handle->client );
2467 if ( handle->ports[0] ) free( handle->ports[0] );
2468 if ( handle->ports[1] ) free( handle->ports[1] );
2469 pthread_cond_destroy( &handle->condition );
2471 stream_.apiHandle = 0;
2474 for ( int i=0; i<2; i++ ) {
2475 if ( stream_.userBuffer[i] ) {
2476 free( stream_.userBuffer[i] );
2477 stream_.userBuffer[i] = 0;
2481 if ( stream_.deviceBuffer ) {
2482 free( stream_.deviceBuffer );
2483 stream_.deviceBuffer = 0;
2486 stream_.mode = UNINITIALIZED;
2487 stream_.state = STREAM_CLOSED;
2490 void RtApiJack :: startStream( void )
2493 if ( stream_.state == STREAM_RUNNING ) {
2494 errorText_ = "RtApiJack::startStream(): the stream is already running!";
2495 error( RtAudioError::WARNING );
2499 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2500 int result = jack_activate( handle->client );
2502 errorText_ = "RtApiJack::startStream(): unable to activate JACK client!";
2508 // Get the list of available ports.
2509 if ( shouldAutoconnect_ && (stream_.mode == OUTPUT || stream_.mode == DUPLEX) ) {
2511 ports = jack_get_ports( handle->client, handle->deviceName[0].c_str(), NULL, JackPortIsInput);
2512 if ( ports == NULL) {
2513 errorText_ = "RtApiJack::startStream(): error determining available JACK input ports!";
2517 // Now make the port connections. Since RtAudio wasn't designed to
2518 // allow the user to select particular channels of a device, we'll
2519 // just open the first "nChannels" ports with offset.
2520 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2522 if ( ports[ stream_.channelOffset[0] + i ] )
2523 result = jack_connect( handle->client, jack_port_name( handle->ports[0][i] ), ports[ stream_.channelOffset[0] + i ] );
2526 errorText_ = "RtApiJack::startStream(): error connecting output ports!";
2533 if ( shouldAutoconnect_ && (stream_.mode == INPUT || stream_.mode == DUPLEX) ) {
2535 ports = jack_get_ports( handle->client, handle->deviceName[1].c_str(), NULL, JackPortIsOutput );
2536 if ( ports == NULL) {
2537 errorText_ = "RtApiJack::startStream(): error determining available JACK output ports!";
2541 // Now make the port connections. See note above.
2542 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2544 if ( ports[ stream_.channelOffset[1] + i ] )
2545 result = jack_connect( handle->client, ports[ stream_.channelOffset[1] + i ], jack_port_name( handle->ports[1][i] ) );
2548 errorText_ = "RtApiJack::startStream(): error connecting input ports!";
2555 handle->drainCounter = 0;
2556 handle->internalDrain = false;
2557 stream_.state = STREAM_RUNNING;
2560 if ( result == 0 ) return;
2561 error( RtAudioError::SYSTEM_ERROR );
2564 void RtApiJack :: stopStream( void )
2567 if ( stream_.state == STREAM_STOPPED ) {
2568 errorText_ = "RtApiJack::stopStream(): the stream is already stopped!";
2569 error( RtAudioError::WARNING );
2573 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2574 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
2576 if ( handle->drainCounter == 0 ) {
2577 handle->drainCounter = 2;
2578 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
2582 jack_deactivate( handle->client );
2583 stream_.state = STREAM_STOPPED;
2586 void RtApiJack :: abortStream( void )
2589 if ( stream_.state == STREAM_STOPPED ) {
2590 errorText_ = "RtApiJack::abortStream(): the stream is already stopped!";
2591 error( RtAudioError::WARNING );
2595 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2596 handle->drainCounter = 2;
2601 // This function will be called by a spawned thread when the user
2602 // callback function signals that the stream should be stopped or
2603 // aborted. It is necessary to handle it this way because the
2604 // callbackEvent() function must return before the jack_deactivate()
2605 // function will return.
2606 static void *jackStopStream( void *ptr )
2608 CallbackInfo *info = (CallbackInfo *) ptr;
2609 RtApiJack *object = (RtApiJack *) info->object;
2611 object->stopStream();
2612 pthread_exit( NULL );
2615 bool RtApiJack :: callbackEvent( unsigned long nframes )
2617 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
2618 if ( stream_.state == STREAM_CLOSED ) {
2619 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
2620 error( RtAudioError::WARNING );
2623 if ( stream_.bufferSize != nframes ) {
2624 errorText_ = "RtApiCore::callbackEvent(): the JACK buffer size has changed ... cannot process!";
2625 error( RtAudioError::WARNING );
2629 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
2630 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2632 // Check if we were draining the stream and signal is finished.
2633 if ( handle->drainCounter > 3 ) {
2634 ThreadHandle threadId;
2636 stream_.state = STREAM_STOPPING;
2637 if ( handle->internalDrain == true )
2638 pthread_create( &threadId, NULL, jackStopStream, info );
2640 pthread_cond_signal( &handle->condition );
2644 // Invoke user callback first, to get fresh output data.
2645 if ( handle->drainCounter == 0 ) {
2646 RtAudioCallback callback = (RtAudioCallback) info->callback;
2647 double streamTime = getStreamTime();
2648 RtAudioStreamStatus status = 0;
2649 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
2650 status |= RTAUDIO_OUTPUT_UNDERFLOW;
2651 handle->xrun[0] = false;
2653 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
2654 status |= RTAUDIO_INPUT_OVERFLOW;
2655 handle->xrun[1] = false;
2657 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
2658 stream_.bufferSize, streamTime, status, info->userData );
2659 if ( cbReturnValue == 2 ) {
2660 stream_.state = STREAM_STOPPING;
2661 handle->drainCounter = 2;
2663 pthread_create( &id, NULL, jackStopStream, info );
2666 else if ( cbReturnValue == 1 ) {
2667 handle->drainCounter = 1;
2668 handle->internalDrain = true;
2672 jack_default_audio_sample_t *jackbuffer;
2673 unsigned long bufferBytes = nframes * sizeof( jack_default_audio_sample_t );
2674 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
2676 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
2678 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
2679 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2680 memset( jackbuffer, 0, bufferBytes );
2684 else if ( stream_.doConvertBuffer[0] ) {
2686 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
2688 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
2689 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2690 memcpy( jackbuffer, &stream_.deviceBuffer[i*bufferBytes], bufferBytes );
2693 else { // no buffer conversion
2694 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2695 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2696 memcpy( jackbuffer, &stream_.userBuffer[0][i*bufferBytes], bufferBytes );
2701 // Don't bother draining input
2702 if ( handle->drainCounter ) {
2703 handle->drainCounter++;
2707 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
2709 if ( stream_.doConvertBuffer[1] ) {
2710 for ( unsigned int i=0; i<stream_.nDeviceChannels[1]; i++ ) {
2711 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
2712 memcpy( &stream_.deviceBuffer[i*bufferBytes], jackbuffer, bufferBytes );
2714 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
2716 else { // no buffer conversion
2717 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2718 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
2719 memcpy( &stream_.userBuffer[1][i*bufferBytes], jackbuffer, bufferBytes );
2725 RtApi::tickStreamTime();
2728 //******************** End of __UNIX_JACK__ *********************//
2731 #if defined(__WINDOWS_ASIO__) // ASIO API on Windows
2733 // The ASIO API is designed around a callback scheme, so this
2734 // implementation is similar to that used for OS-X CoreAudio and Linux
2735 // Jack. The primary constraint with ASIO is that it only allows
2736 // access to a single driver at a time. Thus, it is not possible to
2737 // have more than one simultaneous RtAudio stream.
2739 // This implementation also requires a number of external ASIO files
2740 // and a few global variables. The ASIO callback scheme does not
2741 // allow for the passing of user data, so we must create a global
2742 // pointer to our callbackInfo structure.
2744 // On unix systems, we make use of a pthread condition variable.
2745 // Since there is no equivalent in Windows, I hacked something based
2746 // on information found in
2747 // http://www.cs.wustl.edu/~schmidt/win32-cv-1.html.
2749 #include "asiosys.h"
2751 #include "iasiothiscallresolver.h"
2752 #include "asiodrivers.h"
2755 static AsioDrivers drivers;
2756 static ASIOCallbacks asioCallbacks;
2757 static ASIODriverInfo driverInfo;
2758 static CallbackInfo *asioCallbackInfo;
2759 static bool asioXRun;
2762 int drainCounter; // Tracks callback counts when draining
2763 bool internalDrain; // Indicates if stop is initiated from callback or not.
2764 ASIOBufferInfo *bufferInfos;
2768 :drainCounter(0), internalDrain(false), bufferInfos(0) {}
2771 // Function declarations (definitions at end of section)
2772 static const char* getAsioErrorString( ASIOError result );
2773 static void sampleRateChanged( ASIOSampleRate sRate );
2774 static long asioMessages( long selector, long value, void* message, double* opt );
2776 RtApiAsio :: RtApiAsio()
2778 // ASIO cannot run on a multi-threaded appartment. You can call
2779 // CoInitialize beforehand, but it must be for appartment threading
2780 // (in which case, CoInitilialize will return S_FALSE here).
2781 coInitialized_ = false;
2782 HRESULT hr = CoInitialize( NULL );
2784 errorText_ = "RtApiAsio::ASIO requires a single-threaded appartment. Call CoInitializeEx(0,COINIT_APARTMENTTHREADED)";
2785 error( RtAudioError::WARNING );
2787 coInitialized_ = true;
2789 drivers.removeCurrentDriver();
2790 driverInfo.asioVersion = 2;
2792 // See note in DirectSound implementation about GetDesktopWindow().
2793 driverInfo.sysRef = GetForegroundWindow();
2796 RtApiAsio :: ~RtApiAsio()
2798 if ( stream_.state != STREAM_CLOSED ) closeStream();
2799 if ( coInitialized_ ) CoUninitialize();
2802 unsigned int RtApiAsio :: getDeviceCount( void )
2804 return (unsigned int) drivers.asioGetNumDev();
2807 RtAudio::DeviceInfo RtApiAsio :: getDeviceInfo( unsigned int device )
2809 RtAudio::DeviceInfo info;
2810 info.probed = false;
2813 unsigned int nDevices = getDeviceCount();
2814 if ( nDevices == 0 ) {
2815 errorText_ = "RtApiAsio::getDeviceInfo: no devices found!";
2816 error( RtAudioError::INVALID_USE );
2820 if ( device >= nDevices ) {
2821 errorText_ = "RtApiAsio::getDeviceInfo: device ID is invalid!";
2822 error( RtAudioError::INVALID_USE );
2826 // If a stream is already open, we cannot probe other devices. Thus, use the saved results.
2827 if ( stream_.state != STREAM_CLOSED ) {
2828 if ( device >= devices_.size() ) {
2829 errorText_ = "RtApiAsio::getDeviceInfo: device ID was not present before stream was opened.";
2830 error( RtAudioError::WARNING );
2833 return devices_[ device ];
2836 char driverName[32];
2837 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
2838 if ( result != ASE_OK ) {
2839 errorStream_ << "RtApiAsio::getDeviceInfo: unable to get driver name (" << getAsioErrorString( result ) << ").";
2840 errorText_ = errorStream_.str();
2841 error( RtAudioError::WARNING );
2845 info.name = driverName;
2847 if ( !drivers.loadDriver( driverName ) ) {
2848 errorStream_ << "RtApiAsio::getDeviceInfo: unable to load driver (" << driverName << ").";
2849 errorText_ = errorStream_.str();
2850 error( RtAudioError::WARNING );
2854 result = ASIOInit( &driverInfo );
2855 if ( result != ASE_OK ) {
2856 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
2857 errorText_ = errorStream_.str();
2858 error( RtAudioError::WARNING );
2862 // Determine the device channel information.
2863 long inputChannels, outputChannels;
2864 result = ASIOGetChannels( &inputChannels, &outputChannels );
2865 if ( result != ASE_OK ) {
2866 drivers.removeCurrentDriver();
2867 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
2868 errorText_ = errorStream_.str();
2869 error( RtAudioError::WARNING );
2873 info.outputChannels = outputChannels;
2874 info.inputChannels = inputChannels;
2875 if ( info.outputChannels > 0 && info.inputChannels > 0 )
2876 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
2878 // Determine the supported sample rates.
2879 info.sampleRates.clear();
2880 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
2881 result = ASIOCanSampleRate( (ASIOSampleRate) SAMPLE_RATES[i] );
2882 if ( result == ASE_OK ) {
2883 info.sampleRates.push_back( SAMPLE_RATES[i] );
2885 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
2886 info.preferredSampleRate = SAMPLE_RATES[i];
2890 // Determine supported data types ... just check first channel and assume rest are the same.
2891 ASIOChannelInfo channelInfo;
2892 channelInfo.channel = 0;
2893 channelInfo.isInput = true;
2894 if ( info.inputChannels <= 0 ) channelInfo.isInput = false;
2895 result = ASIOGetChannelInfo( &channelInfo );
2896 if ( result != ASE_OK ) {
2897 drivers.removeCurrentDriver();
2898 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting driver channel info (" << driverName << ").";
2899 errorText_ = errorStream_.str();
2900 error( RtAudioError::WARNING );
2904 info.nativeFormats = 0;
2905 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB )
2906 info.nativeFormats |= RTAUDIO_SINT16;
2907 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB )
2908 info.nativeFormats |= RTAUDIO_SINT32;
2909 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB )
2910 info.nativeFormats |= RTAUDIO_FLOAT32;
2911 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB )
2912 info.nativeFormats |= RTAUDIO_FLOAT64;
2913 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB )
2914 info.nativeFormats |= RTAUDIO_SINT24;
2916 if ( info.outputChannels > 0 )
2917 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
2918 if ( info.inputChannels > 0 )
2919 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
2922 drivers.removeCurrentDriver();
2926 static void bufferSwitch( long index, ASIOBool /*processNow*/ )
2928 RtApiAsio *object = (RtApiAsio *) asioCallbackInfo->object;
2929 object->callbackEvent( index );
2932 void RtApiAsio :: saveDeviceInfo( void )
2936 unsigned int nDevices = getDeviceCount();
2937 devices_.resize( nDevices );
2938 for ( unsigned int i=0; i<nDevices; i++ )
2939 devices_[i] = getDeviceInfo( i );
2942 bool RtApiAsio :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
2943 unsigned int firstChannel, unsigned int sampleRate,
2944 RtAudioFormat format, unsigned int *bufferSize,
2945 RtAudio::StreamOptions *options )
2946 {////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
2948 bool isDuplexInput = mode == INPUT && stream_.mode == OUTPUT;
2950 // For ASIO, a duplex stream MUST use the same driver.
2951 if ( isDuplexInput && stream_.device[0] != device ) {
2952 errorText_ = "RtApiAsio::probeDeviceOpen: an ASIO duplex stream must use the same device for input and output!";
2956 char driverName[32];
2957 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
2958 if ( result != ASE_OK ) {
2959 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to get driver name (" << getAsioErrorString( result ) << ").";
2960 errorText_ = errorStream_.str();
2964 // Only load the driver once for duplex stream.
2965 if ( !isDuplexInput ) {
2966 // The getDeviceInfo() function will not work when a stream is open
2967 // because ASIO does not allow multiple devices to run at the same
2968 // time. Thus, we'll probe the system before opening a stream and
2969 // save the results for use by getDeviceInfo().
2970 this->saveDeviceInfo();
2972 if ( !drivers.loadDriver( driverName ) ) {
2973 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to load driver (" << driverName << ").";
2974 errorText_ = errorStream_.str();
2978 result = ASIOInit( &driverInfo );
2979 if ( result != ASE_OK ) {
2980 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
2981 errorText_ = errorStream_.str();
2986 // keep them before any "goto error", they are used for error cleanup + goto device boundary checks
2987 bool buffersAllocated = false;
2988 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
2989 unsigned int nChannels;
2992 // Check the device channel count.
2993 long inputChannels, outputChannels;
2994 result = ASIOGetChannels( &inputChannels, &outputChannels );
2995 if ( result != ASE_OK ) {
2996 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
2997 errorText_ = errorStream_.str();
3001 if ( ( mode == OUTPUT && (channels+firstChannel) > (unsigned int) outputChannels) ||
3002 ( mode == INPUT && (channels+firstChannel) > (unsigned int) inputChannels) ) {
3003 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested channel count (" << channels << ") + offset (" << firstChannel << ").";
3004 errorText_ = errorStream_.str();
3007 stream_.nDeviceChannels[mode] = channels;
3008 stream_.nUserChannels[mode] = channels;
3009 stream_.channelOffset[mode] = firstChannel;
3011 // Verify the sample rate is supported.
3012 result = ASIOCanSampleRate( (ASIOSampleRate) sampleRate );
3013 if ( result != ASE_OK ) {
3014 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested sample rate (" << sampleRate << ").";
3015 errorText_ = errorStream_.str();
3019 // Get the current sample rate
3020 ASIOSampleRate currentRate;
3021 result = ASIOGetSampleRate( ¤tRate );
3022 if ( result != ASE_OK ) {
3023 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error getting sample rate.";
3024 errorText_ = errorStream_.str();
3028 // Set the sample rate only if necessary
3029 if ( currentRate != sampleRate ) {
3030 result = ASIOSetSampleRate( (ASIOSampleRate) sampleRate );
3031 if ( result != ASE_OK ) {
3032 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error setting sample rate (" << sampleRate << ").";
3033 errorText_ = errorStream_.str();
3038 // Determine the driver data type.
3039 ASIOChannelInfo channelInfo;
3040 channelInfo.channel = 0;
3041 if ( mode == OUTPUT ) channelInfo.isInput = false;
3042 else channelInfo.isInput = true;
3043 result = ASIOGetChannelInfo( &channelInfo );
3044 if ( result != ASE_OK ) {
3045 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting data format.";
3046 errorText_ = errorStream_.str();
3050 // Assuming WINDOWS host is always little-endian.
3051 stream_.doByteSwap[mode] = false;
3052 stream_.userFormat = format;
3053 stream_.deviceFormat[mode] = 0;
3054 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB ) {
3055 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
3056 if ( channelInfo.type == ASIOSTInt16MSB ) stream_.doByteSwap[mode] = true;
3058 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB ) {
3059 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
3060 if ( channelInfo.type == ASIOSTInt32MSB ) stream_.doByteSwap[mode] = true;
3062 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB ) {
3063 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
3064 if ( channelInfo.type == ASIOSTFloat32MSB ) stream_.doByteSwap[mode] = true;
3066 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB ) {
3067 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
3068 if ( channelInfo.type == ASIOSTFloat64MSB ) stream_.doByteSwap[mode] = true;
3070 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB ) {
3071 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
3072 if ( channelInfo.type == ASIOSTInt24MSB ) stream_.doByteSwap[mode] = true;
3075 if ( stream_.deviceFormat[mode] == 0 ) {
3076 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") data format not supported by RtAudio.";
3077 errorText_ = errorStream_.str();
3081 // Set the buffer size. For a duplex stream, this will end up
3082 // setting the buffer size based on the input constraints, which
3084 long minSize, maxSize, preferSize, granularity;
3085 result = ASIOGetBufferSize( &minSize, &maxSize, &preferSize, &granularity );
3086 if ( result != ASE_OK ) {
3087 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting buffer size.";
3088 errorText_ = errorStream_.str();
3092 if ( isDuplexInput ) {
3093 // When this is the duplex input (output was opened before), then we have to use the same
3094 // buffersize as the output, because it might use the preferred buffer size, which most
3095 // likely wasn't passed as input to this. The buffer sizes have to be identically anyway,
3096 // So instead of throwing an error, make them equal. The caller uses the reference
3097 // to the "bufferSize" param as usual to set up processing buffers.
3099 *bufferSize = stream_.bufferSize;
3102 if ( *bufferSize == 0 ) *bufferSize = preferSize;
3103 else if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
3104 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
3105 else if ( granularity == -1 ) {
3106 // Make sure bufferSize is a power of two.
3107 int log2_of_min_size = 0;
3108 int log2_of_max_size = 0;
3110 for ( unsigned int i = 0; i < sizeof(long) * 8; i++ ) {
3111 if ( minSize & ((long)1 << i) ) log2_of_min_size = i;
3112 if ( maxSize & ((long)1 << i) ) log2_of_max_size = i;
3115 long min_delta = std::abs( (long)*bufferSize - ((long)1 << log2_of_min_size) );
3116 int min_delta_num = log2_of_min_size;
3118 for (int i = log2_of_min_size + 1; i <= log2_of_max_size; i++) {
3119 long current_delta = std::abs( (long)*bufferSize - ((long)1 << i) );
3120 if (current_delta < min_delta) {
3121 min_delta = current_delta;
3126 *bufferSize = ( (unsigned int)1 << min_delta_num );
3127 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
3128 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
3130 else if ( granularity != 0 ) {
3131 // Set to an even multiple of granularity, rounding up.
3132 *bufferSize = (*bufferSize + granularity-1) / granularity * granularity;
3137 // we don't use it anymore, see above!
3138 // Just left it here for the case...
3139 if ( isDuplexInput && stream_.bufferSize != *bufferSize ) {
3140 errorText_ = "RtApiAsio::probeDeviceOpen: input/output buffersize discrepancy!";
3145 stream_.bufferSize = *bufferSize;
3146 stream_.nBuffers = 2;
3148 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
3149 else stream_.userInterleaved = true;
3151 // ASIO always uses non-interleaved buffers.
3152 stream_.deviceInterleaved[mode] = false;
3154 // Allocate, if necessary, our AsioHandle structure for the stream.
3155 if ( handle == 0 ) {
3157 handle = new AsioHandle;
3159 catch ( std::bad_alloc& ) {
3160 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating AsioHandle memory.";
3163 handle->bufferInfos = 0;
3165 // Create a manual-reset event.
3166 handle->condition = CreateEvent( NULL, // no security
3167 TRUE, // manual-reset
3168 FALSE, // non-signaled initially
3170 stream_.apiHandle = (void *) handle;
3173 // Create the ASIO internal buffers. Since RtAudio sets up input
3174 // and output separately, we'll have to dispose of previously
3175 // created output buffers for a duplex stream.
3176 if ( mode == INPUT && stream_.mode == OUTPUT ) {
3177 ASIODisposeBuffers();
3178 if ( handle->bufferInfos ) free( handle->bufferInfos );
3181 // Allocate, initialize, and save the bufferInfos in our stream callbackInfo structure.
3183 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
3184 handle->bufferInfos = (ASIOBufferInfo *) malloc( nChannels * sizeof(ASIOBufferInfo) );
3185 if ( handle->bufferInfos == NULL ) {
3186 errorStream_ << "RtApiAsio::probeDeviceOpen: error allocating bufferInfo memory for driver (" << driverName << ").";
3187 errorText_ = errorStream_.str();
3191 ASIOBufferInfo *infos;
3192 infos = handle->bufferInfos;
3193 for ( i=0; i<stream_.nDeviceChannels[0]; i++, infos++ ) {
3194 infos->isInput = ASIOFalse;
3195 infos->channelNum = i + stream_.channelOffset[0];
3196 infos->buffers[0] = infos->buffers[1] = 0;
3198 for ( i=0; i<stream_.nDeviceChannels[1]; i++, infos++ ) {
3199 infos->isInput = ASIOTrue;
3200 infos->channelNum = i + stream_.channelOffset[1];
3201 infos->buffers[0] = infos->buffers[1] = 0;
3204 // prepare for callbacks
3205 stream_.sampleRate = sampleRate;
3206 stream_.device[mode] = device;
3207 stream_.mode = isDuplexInput ? DUPLEX : mode;
3209 // store this class instance before registering callbacks, that are going to use it
3210 asioCallbackInfo = &stream_.callbackInfo;
3211 stream_.callbackInfo.object = (void *) this;
3213 // Set up the ASIO callback structure and create the ASIO data buffers.
3214 asioCallbacks.bufferSwitch = &bufferSwitch;
3215 asioCallbacks.sampleRateDidChange = &sampleRateChanged;
3216 asioCallbacks.asioMessage = &asioMessages;
3217 asioCallbacks.bufferSwitchTimeInfo = NULL;
3218 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
3219 if ( result != ASE_OK ) {
3220 // Standard method failed. This can happen with strict/misbehaving drivers that return valid buffer size ranges
3221 // but only accept the preferred buffer size as parameter for ASIOCreateBuffers. eg. Creatives ASIO driver
3222 // in that case, let's be naïve and try that instead
3223 *bufferSize = preferSize;
3224 stream_.bufferSize = *bufferSize;
3225 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
3228 if ( result != ASE_OK ) {
3229 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") creating buffers.";
3230 errorText_ = errorStream_.str();
3233 buffersAllocated = true;
3234 stream_.state = STREAM_STOPPED;
3236 // Set flags for buffer conversion.
3237 stream_.doConvertBuffer[mode] = false;
3238 if ( stream_.userFormat != stream_.deviceFormat[mode] )
3239 stream_.doConvertBuffer[mode] = true;
3240 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
3241 stream_.nUserChannels[mode] > 1 )
3242 stream_.doConvertBuffer[mode] = true;
3244 // Allocate necessary internal buffers
3245 unsigned long bufferBytes;
3246 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
3247 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
3248 if ( stream_.userBuffer[mode] == NULL ) {
3249 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating user buffer memory.";
3253 if ( stream_.doConvertBuffer[mode] ) {
3255 bool makeBuffer = true;
3256 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
3257 if ( isDuplexInput && stream_.deviceBuffer ) {
3258 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
3259 if ( bufferBytes <= bytesOut ) makeBuffer = false;
3263 bufferBytes *= *bufferSize;
3264 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
3265 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
3266 if ( stream_.deviceBuffer == NULL ) {
3267 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating device buffer memory.";
3273 // Determine device latencies
3274 long inputLatency, outputLatency;
3275 result = ASIOGetLatencies( &inputLatency, &outputLatency );
3276 if ( result != ASE_OK ) {
3277 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting latency.";
3278 errorText_ = errorStream_.str();
3279 error( RtAudioError::WARNING); // warn but don't fail
3282 stream_.latency[0] = outputLatency;
3283 stream_.latency[1] = inputLatency;
3286 // Setup the buffer conversion information structure. We don't use
3287 // buffers to do channel offsets, so we override that parameter
3289 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
3294 if ( !isDuplexInput ) {
3295 // the cleanup for error in the duplex input, is done by RtApi::openStream
3296 // So we clean up for single channel only
3298 if ( buffersAllocated )
3299 ASIODisposeBuffers();
3301 drivers.removeCurrentDriver();
3304 CloseHandle( handle->condition );
3305 if ( handle->bufferInfos )
3306 free( handle->bufferInfos );
3309 stream_.apiHandle = 0;
3313 if ( stream_.userBuffer[mode] ) {
3314 free( stream_.userBuffer[mode] );
3315 stream_.userBuffer[mode] = 0;
3318 if ( stream_.deviceBuffer ) {
3319 free( stream_.deviceBuffer );
3320 stream_.deviceBuffer = 0;
3325 }////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
3327 void RtApiAsio :: closeStream()
3329 if ( stream_.state == STREAM_CLOSED ) {
3330 errorText_ = "RtApiAsio::closeStream(): no open stream to close!";
3331 error( RtAudioError::WARNING );
3335 if ( stream_.state == STREAM_RUNNING ) {
3336 stream_.state = STREAM_STOPPED;
3339 ASIODisposeBuffers();
3340 drivers.removeCurrentDriver();
3342 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3344 CloseHandle( handle->condition );
3345 if ( handle->bufferInfos )
3346 free( handle->bufferInfos );
3348 stream_.apiHandle = 0;
3351 for ( int i=0; i<2; i++ ) {
3352 if ( stream_.userBuffer[i] ) {
3353 free( stream_.userBuffer[i] );
3354 stream_.userBuffer[i] = 0;
3358 if ( stream_.deviceBuffer ) {
3359 free( stream_.deviceBuffer );
3360 stream_.deviceBuffer = 0;
3363 stream_.mode = UNINITIALIZED;
3364 stream_.state = STREAM_CLOSED;
3367 bool stopThreadCalled = false;
3369 void RtApiAsio :: startStream()
3372 if ( stream_.state == STREAM_RUNNING ) {
3373 errorText_ = "RtApiAsio::startStream(): the stream is already running!";
3374 error( RtAudioError::WARNING );
3378 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3379 ASIOError result = ASIOStart();
3380 if ( result != ASE_OK ) {
3381 errorStream_ << "RtApiAsio::startStream: error (" << getAsioErrorString( result ) << ") starting device.";
3382 errorText_ = errorStream_.str();
3386 handle->drainCounter = 0;
3387 handle->internalDrain = false;
3388 ResetEvent( handle->condition );
3389 stream_.state = STREAM_RUNNING;
3393 stopThreadCalled = false;
3395 if ( result == ASE_OK ) return;
3396 error( RtAudioError::SYSTEM_ERROR );
3399 void RtApiAsio :: stopStream()
3402 if ( stream_.state == STREAM_STOPPED ) {
3403 errorText_ = "RtApiAsio::stopStream(): the stream is already stopped!";
3404 error( RtAudioError::WARNING );
3408 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3409 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
3410 if ( handle->drainCounter == 0 ) {
3411 handle->drainCounter = 2;
3412 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
3416 stream_.state = STREAM_STOPPED;
3418 ASIOError result = ASIOStop();
3419 if ( result != ASE_OK ) {
3420 errorStream_ << "RtApiAsio::stopStream: error (" << getAsioErrorString( result ) << ") stopping device.";
3421 errorText_ = errorStream_.str();
3424 if ( result == ASE_OK ) return;
3425 error( RtAudioError::SYSTEM_ERROR );
3428 void RtApiAsio :: abortStream()
3431 if ( stream_.state == STREAM_STOPPED ) {
3432 errorText_ = "RtApiAsio::abortStream(): the stream is already stopped!";
3433 error( RtAudioError::WARNING );
3437 // The following lines were commented-out because some behavior was
3438 // noted where the device buffers need to be zeroed to avoid
3439 // continuing sound, even when the device buffers are completely
3440 // disposed. So now, calling abort is the same as calling stop.
3441 // AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3442 // handle->drainCounter = 2;
3446 // This function will be called by a spawned thread when the user
3447 // callback function signals that the stream should be stopped or
3448 // aborted. It is necessary to handle it this way because the
3449 // callbackEvent() function must return before the ASIOStop()
3450 // function will return.
3451 static unsigned __stdcall asioStopStream( void *ptr )
3453 CallbackInfo *info = (CallbackInfo *) ptr;
3454 RtApiAsio *object = (RtApiAsio *) info->object;
3456 object->stopStream();
3461 bool RtApiAsio :: callbackEvent( long bufferIndex )
3463 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
3464 if ( stream_.state == STREAM_CLOSED ) {
3465 errorText_ = "RtApiAsio::callbackEvent(): the stream is closed ... this shouldn't happen!";
3466 error( RtAudioError::WARNING );
3470 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
3471 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3473 // Check if we were draining the stream and signal if finished.
3474 if ( handle->drainCounter > 3 ) {
3476 stream_.state = STREAM_STOPPING;
3477 if ( handle->internalDrain == false )
3478 SetEvent( handle->condition );
3479 else { // spawn a thread to stop the stream
3481 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
3482 &stream_.callbackInfo, 0, &threadId );
3487 // Invoke user callback to get fresh output data UNLESS we are
3489 if ( handle->drainCounter == 0 ) {
3490 RtAudioCallback callback = (RtAudioCallback) info->callback;
3491 double streamTime = getStreamTime();
3492 RtAudioStreamStatus status = 0;
3493 if ( stream_.mode != INPUT && asioXRun == true ) {
3494 status |= RTAUDIO_OUTPUT_UNDERFLOW;
3497 if ( stream_.mode != OUTPUT && asioXRun == true ) {
3498 status |= RTAUDIO_INPUT_OVERFLOW;
3501 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
3502 stream_.bufferSize, streamTime, status, info->userData );
3503 if ( cbReturnValue == 2 ) {
3504 stream_.state = STREAM_STOPPING;
3505 handle->drainCounter = 2;
3507 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
3508 &stream_.callbackInfo, 0, &threadId );
3511 else if ( cbReturnValue == 1 ) {
3512 handle->drainCounter = 1;
3513 handle->internalDrain = true;
3517 unsigned int nChannels, bufferBytes, i, j;
3518 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
3519 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
3521 bufferBytes = stream_.bufferSize * formatBytes( stream_.deviceFormat[0] );
3523 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
3525 for ( i=0, j=0; i<nChannels; i++ ) {
3526 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3527 memset( handle->bufferInfos[i].buffers[bufferIndex], 0, bufferBytes );
3531 else if ( stream_.doConvertBuffer[0] ) {
3533 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
3534 if ( stream_.doByteSwap[0] )
3535 byteSwapBuffer( stream_.deviceBuffer,
3536 stream_.bufferSize * stream_.nDeviceChannels[0],
3537 stream_.deviceFormat[0] );
3539 for ( i=0, j=0; i<nChannels; i++ ) {
3540 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3541 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
3542 &stream_.deviceBuffer[j++*bufferBytes], bufferBytes );
3548 if ( stream_.doByteSwap[0] )
3549 byteSwapBuffer( stream_.userBuffer[0],
3550 stream_.bufferSize * stream_.nUserChannels[0],
3551 stream_.userFormat );
3553 for ( i=0, j=0; i<nChannels; i++ ) {
3554 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3555 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
3556 &stream_.userBuffer[0][bufferBytes*j++], bufferBytes );
3562 // Don't bother draining input
3563 if ( handle->drainCounter ) {
3564 handle->drainCounter++;
3568 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
3570 bufferBytes = stream_.bufferSize * formatBytes(stream_.deviceFormat[1]);
3572 if (stream_.doConvertBuffer[1]) {
3574 // Always interleave ASIO input data.
3575 for ( i=0, j=0; i<nChannels; i++ ) {
3576 if ( handle->bufferInfos[i].isInput == ASIOTrue )
3577 memcpy( &stream_.deviceBuffer[j++*bufferBytes],
3578 handle->bufferInfos[i].buffers[bufferIndex],
3582 if ( stream_.doByteSwap[1] )
3583 byteSwapBuffer( stream_.deviceBuffer,
3584 stream_.bufferSize * stream_.nDeviceChannels[1],
3585 stream_.deviceFormat[1] );
3586 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
3590 for ( i=0, j=0; i<nChannels; i++ ) {
3591 if ( handle->bufferInfos[i].isInput == ASIOTrue ) {
3592 memcpy( &stream_.userBuffer[1][bufferBytes*j++],
3593 handle->bufferInfos[i].buffers[bufferIndex],
3598 if ( stream_.doByteSwap[1] )
3599 byteSwapBuffer( stream_.userBuffer[1],
3600 stream_.bufferSize * stream_.nUserChannels[1],
3601 stream_.userFormat );
3606 // The following call was suggested by Malte Clasen. While the API
3607 // documentation indicates it should not be required, some device
3608 // drivers apparently do not function correctly without it.
3611 RtApi::tickStreamTime();
3615 static void sampleRateChanged( ASIOSampleRate sRate )
3617 // The ASIO documentation says that this usually only happens during
3618 // external sync. Audio processing is not stopped by the driver,
3619 // actual sample rate might not have even changed, maybe only the
3620 // sample rate status of an AES/EBU or S/PDIF digital input at the
3623 RtApi *object = (RtApi *) asioCallbackInfo->object;
3625 object->stopStream();
3627 catch ( RtAudioError &exception ) {
3628 std::cerr << "\nRtApiAsio: sampleRateChanged() error (" << exception.getMessage() << ")!\n" << std::endl;
3632 std::cerr << "\nRtApiAsio: driver reports sample rate changed to " << sRate << " ... stream stopped!!!\n" << std::endl;
3635 static long asioMessages( long selector, long value, void* /*message*/, double* /*opt*/ )
3639 switch( selector ) {
3640 case kAsioSelectorSupported:
3641 if ( value == kAsioResetRequest
3642 || value == kAsioEngineVersion
3643 || value == kAsioResyncRequest
3644 || value == kAsioLatenciesChanged
3645 // The following three were added for ASIO 2.0, you don't
3646 // necessarily have to support them.
3647 || value == kAsioSupportsTimeInfo
3648 || value == kAsioSupportsTimeCode
3649 || value == kAsioSupportsInputMonitor)
3652 case kAsioResetRequest:
3653 // Defer the task and perform the reset of the driver during the
3654 // next "safe" situation. You cannot reset the driver right now,
3655 // as this code is called from the driver. Reset the driver is
3656 // done by completely destruct is. I.e. ASIOStop(),
3657 // ASIODisposeBuffers(), Destruction Afterwards you initialize the
3659 std::cerr << "\nRtApiAsio: driver reset requested!!!" << std::endl;
3662 case kAsioResyncRequest:
3663 // This informs the application that the driver encountered some
3664 // non-fatal data loss. It is used for synchronization purposes
3665 // of different media. Added mainly to work around the Win16Mutex
3666 // problems in Windows 95/98 with the Windows Multimedia system,
3667 // which could lose data because the Mutex was held too long by
3668 // another thread. However a driver can issue it in other
3670 // std::cerr << "\nRtApiAsio: driver resync requested!!!" << std::endl;
3674 case kAsioLatenciesChanged:
3675 // This will inform the host application that the drivers were
3676 // latencies changed. Beware, it this does not mean that the
3677 // buffer sizes have changed! You might need to update internal
3679 std::cerr << "\nRtApiAsio: driver latency may have changed!!!" << std::endl;
3682 case kAsioEngineVersion:
3683 // Return the supported ASIO version of the host application. If
3684 // a host application does not implement this selector, ASIO 1.0
3685 // is assumed by the driver.
3688 case kAsioSupportsTimeInfo:
3689 // Informs the driver whether the
3690 // asioCallbacks.bufferSwitchTimeInfo() callback is supported.
3691 // For compatibility with ASIO 1.0 drivers the host application
3692 // should always support the "old" bufferSwitch method, too.
3695 case kAsioSupportsTimeCode:
3696 // Informs the driver whether application is interested in time
3697 // code info. If an application does not need to know about time
3698 // code, the driver has less work to do.
3705 static const char* getAsioErrorString( ASIOError result )
3713 static const Messages m[] =
3715 { ASE_NotPresent, "Hardware input or output is not present or available." },
3716 { ASE_HWMalfunction, "Hardware is malfunctioning." },
3717 { ASE_InvalidParameter, "Invalid input parameter." },
3718 { ASE_InvalidMode, "Invalid mode." },
3719 { ASE_SPNotAdvancing, "Sample position not advancing." },
3720 { ASE_NoClock, "Sample clock or rate cannot be determined or is not present." },
3721 { ASE_NoMemory, "Not enough memory to complete the request." }
3724 for ( unsigned int i = 0; i < sizeof(m)/sizeof(m[0]); ++i )
3725 if ( m[i].value == result ) return m[i].message;
3727 return "Unknown error.";
3730 //******************** End of __WINDOWS_ASIO__ *********************//
3734 #if defined(__WINDOWS_WASAPI__) // Windows WASAPI API
3736 // Authored by Marcus Tomlinson <themarcustomlinson@gmail.com>, April 2014
3737 // - Introduces support for the Windows WASAPI API
3738 // - Aims to deliver bit streams to and from hardware at the lowest possible latency, via the absolute minimum buffer sizes required
3739 // - Provides flexible stream configuration to an otherwise strict and inflexible WASAPI interface
3740 // - Includes automatic internal conversion of sample rate and buffer size between hardware and the user
3745 #include <audioclient.h>
3747 #include <mmdeviceapi.h>
3748 #include <functiondiscoverykeys_devpkey.h>
3751 //=============================================================================
3753 #define SAFE_RELEASE( objectPtr )\
3756 objectPtr->Release();\
3760 typedef HANDLE ( __stdcall *TAvSetMmThreadCharacteristicsPtr )( LPCWSTR TaskName, LPDWORD TaskIndex );
3762 //-----------------------------------------------------------------------------
3764 // WASAPI dictates stream sample rate, format, channel count, and in some cases, buffer size.
3765 // Therefore we must perform all necessary conversions to user buffers in order to satisfy these
3766 // requirements. WasapiBuffer ring buffers are used between HwIn->UserIn and UserOut->HwOut to
3767 // provide intermediate storage for read / write synchronization.
3781 // sets the length of the internal ring buffer
3782 void setBufferSize( unsigned int bufferSize, unsigned int formatBytes ) {
3785 buffer_ = ( char* ) calloc( bufferSize, formatBytes );
3787 bufferSize_ = bufferSize;
3792 // attempt to push a buffer into the ring buffer at the current "in" index
3793 bool pushBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
3795 if ( !buffer || // incoming buffer is NULL
3796 bufferSize == 0 || // incoming buffer has no data
3797 bufferSize > bufferSize_ ) // incoming buffer too large
3802 unsigned int relOutIndex = outIndex_;
3803 unsigned int inIndexEnd = inIndex_ + bufferSize;
3804 if ( relOutIndex < inIndex_ && inIndexEnd >= bufferSize_ ) {
3805 relOutIndex += bufferSize_;
3808 // "in" index can end on the "out" index but cannot begin at it
3809 if ( inIndex_ <= relOutIndex && inIndexEnd > relOutIndex ) {
3810 return false; // not enough space between "in" index and "out" index
3813 // copy buffer from external to internal
3814 int fromZeroSize = inIndex_ + bufferSize - bufferSize_;
3815 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
3816 int fromInSize = bufferSize - fromZeroSize;
3821 memcpy( &( ( char* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( char ) );
3822 memcpy( buffer_, &( ( char* ) buffer )[fromInSize], fromZeroSize * sizeof( char ) );
3824 case RTAUDIO_SINT16:
3825 memcpy( &( ( short* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( short ) );
3826 memcpy( buffer_, &( ( short* ) buffer )[fromInSize], fromZeroSize * sizeof( short ) );
3828 case RTAUDIO_SINT24:
3829 memcpy( &( ( S24* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( S24 ) );
3830 memcpy( buffer_, &( ( S24* ) buffer )[fromInSize], fromZeroSize * sizeof( S24 ) );
3832 case RTAUDIO_SINT32:
3833 memcpy( &( ( int* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( int ) );
3834 memcpy( buffer_, &( ( int* ) buffer )[fromInSize], fromZeroSize * sizeof( int ) );
3836 case RTAUDIO_FLOAT32:
3837 memcpy( &( ( float* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( float ) );
3838 memcpy( buffer_, &( ( float* ) buffer )[fromInSize], fromZeroSize * sizeof( float ) );
3840 case RTAUDIO_FLOAT64:
3841 memcpy( &( ( double* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( double ) );
3842 memcpy( buffer_, &( ( double* ) buffer )[fromInSize], fromZeroSize * sizeof( double ) );
3846 // update "in" index
3847 inIndex_ += bufferSize;
3848 inIndex_ %= bufferSize_;
3853 // attempt to pull a buffer from the ring buffer from the current "out" index
3854 bool pullBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
3856 if ( !buffer || // incoming buffer is NULL
3857 bufferSize == 0 || // incoming buffer has no data
3858 bufferSize > bufferSize_ ) // incoming buffer too large
3863 unsigned int relInIndex = inIndex_;
3864 unsigned int outIndexEnd = outIndex_ + bufferSize;
3865 if ( relInIndex < outIndex_ && outIndexEnd >= bufferSize_ ) {
3866 relInIndex += bufferSize_;
3869 // "out" index can begin at and end on the "in" index
3870 if ( outIndex_ < relInIndex && outIndexEnd > relInIndex ) {
3871 return false; // not enough space between "out" index and "in" index
3874 // copy buffer from internal to external
3875 int fromZeroSize = outIndex_ + bufferSize - bufferSize_;
3876 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
3877 int fromOutSize = bufferSize - fromZeroSize;
3882 memcpy( buffer, &( ( char* ) buffer_ )[outIndex_], fromOutSize * sizeof( char ) );
3883 memcpy( &( ( char* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( char ) );
3885 case RTAUDIO_SINT16:
3886 memcpy( buffer, &( ( short* ) buffer_ )[outIndex_], fromOutSize * sizeof( short ) );
3887 memcpy( &( ( short* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( short ) );
3889 case RTAUDIO_SINT24:
3890 memcpy( buffer, &( ( S24* ) buffer_ )[outIndex_], fromOutSize * sizeof( S24 ) );
3891 memcpy( &( ( S24* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( S24 ) );
3893 case RTAUDIO_SINT32:
3894 memcpy( buffer, &( ( int* ) buffer_ )[outIndex_], fromOutSize * sizeof( int ) );
3895 memcpy( &( ( int* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( int ) );
3897 case RTAUDIO_FLOAT32:
3898 memcpy( buffer, &( ( float* ) buffer_ )[outIndex_], fromOutSize * sizeof( float ) );
3899 memcpy( &( ( float* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( float ) );
3901 case RTAUDIO_FLOAT64:
3902 memcpy( buffer, &( ( double* ) buffer_ )[outIndex_], fromOutSize * sizeof( double ) );
3903 memcpy( &( ( double* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( double ) );
3907 // update "out" index
3908 outIndex_ += bufferSize;
3909 outIndex_ %= bufferSize_;
3916 unsigned int bufferSize_;
3917 unsigned int inIndex_;
3918 unsigned int outIndex_;
3921 //-----------------------------------------------------------------------------
3923 // A structure to hold various information related to the WASAPI implementation.
3926 IAudioClient* captureAudioClient;
3927 IAudioClient* renderAudioClient;
3928 IAudioCaptureClient* captureClient;
3929 IAudioRenderClient* renderClient;
3930 HANDLE captureEvent;
3934 : captureAudioClient( NULL ),
3935 renderAudioClient( NULL ),
3936 captureClient( NULL ),
3937 renderClient( NULL ),
3938 captureEvent( NULL ),
3939 renderEvent( NULL ) {}
3942 //=============================================================================
3944 RtApiWasapi::RtApiWasapi()
3945 : coInitialized_( false ), deviceEnumerator_( NULL )
3947 // WASAPI can run either apartment or multi-threaded
3948 HRESULT hr = CoInitialize( NULL );
3949 if ( !FAILED( hr ) )
3950 coInitialized_ = true;
3952 // Instantiate device enumerator
3953 hr = CoCreateInstance( __uuidof( MMDeviceEnumerator ), NULL,
3954 CLSCTX_ALL, __uuidof( IMMDeviceEnumerator ),
3955 ( void** ) &deviceEnumerator_ );
3957 if ( FAILED( hr ) ) {
3958 errorText_ = "RtApiWasapi::RtApiWasapi: Unable to instantiate device enumerator";
3959 error( RtAudioError::DRIVER_ERROR );
3963 //-----------------------------------------------------------------------------
3965 RtApiWasapi::~RtApiWasapi()
3967 if ( stream_.state != STREAM_CLOSED )
3970 SAFE_RELEASE( deviceEnumerator_ );
3972 // If this object previously called CoInitialize()
3973 if ( coInitialized_ )
3977 //=============================================================================
3979 unsigned int RtApiWasapi::getDeviceCount( void )
3981 unsigned int captureDeviceCount = 0;
3982 unsigned int renderDeviceCount = 0;
3984 IMMDeviceCollection* captureDevices = NULL;
3985 IMMDeviceCollection* renderDevices = NULL;
3987 // Count capture devices
3989 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
3990 if ( FAILED( hr ) ) {
3991 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device collection.";
3995 hr = captureDevices->GetCount( &captureDeviceCount );
3996 if ( FAILED( hr ) ) {
3997 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device count.";
4001 // Count render devices
4002 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4003 if ( FAILED( hr ) ) {
4004 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device collection.";
4008 hr = renderDevices->GetCount( &renderDeviceCount );
4009 if ( FAILED( hr ) ) {
4010 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device count.";
4015 // release all references
4016 SAFE_RELEASE( captureDevices );
4017 SAFE_RELEASE( renderDevices );
4019 if ( errorText_.empty() )
4020 return captureDeviceCount + renderDeviceCount;
4022 error( RtAudioError::DRIVER_ERROR );
4026 //-----------------------------------------------------------------------------
4028 RtAudio::DeviceInfo RtApiWasapi::getDeviceInfo( unsigned int device )
4030 RtAudio::DeviceInfo info;
4031 unsigned int captureDeviceCount = 0;
4032 unsigned int renderDeviceCount = 0;
4033 std::string defaultDeviceName;
4034 bool isCaptureDevice = false;
4036 PROPVARIANT deviceNameProp;
4037 PROPVARIANT defaultDeviceNameProp;
4039 IMMDeviceCollection* captureDevices = NULL;
4040 IMMDeviceCollection* renderDevices = NULL;
4041 IMMDevice* devicePtr = NULL;
4042 IMMDevice* defaultDevicePtr = NULL;
4043 IAudioClient* audioClient = NULL;
4044 IPropertyStore* devicePropStore = NULL;
4045 IPropertyStore* defaultDevicePropStore = NULL;
4047 WAVEFORMATEX* deviceFormat = NULL;
4048 WAVEFORMATEX* closestMatchFormat = NULL;
4051 info.probed = false;
4053 // Count capture devices
4055 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4056 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4057 if ( FAILED( hr ) ) {
4058 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device collection.";
4062 hr = captureDevices->GetCount( &captureDeviceCount );
4063 if ( FAILED( hr ) ) {
4064 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device count.";
4068 // Count render devices
4069 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4070 if ( FAILED( hr ) ) {
4071 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device collection.";
4075 hr = renderDevices->GetCount( &renderDeviceCount );
4076 if ( FAILED( hr ) ) {
4077 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device count.";
4081 // validate device index
4082 if ( device >= captureDeviceCount + renderDeviceCount ) {
4083 errorText_ = "RtApiWasapi::getDeviceInfo: Invalid device index.";
4084 errorType = RtAudioError::INVALID_USE;
4088 // determine whether index falls within capture or render devices
4089 if ( device >= renderDeviceCount ) {
4090 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
4091 if ( FAILED( hr ) ) {
4092 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device handle.";
4095 isCaptureDevice = true;
4098 hr = renderDevices->Item( device, &devicePtr );
4099 if ( FAILED( hr ) ) {
4100 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device handle.";
4103 isCaptureDevice = false;
4106 // get default device name
4107 if ( isCaptureDevice ) {
4108 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eCapture, eConsole, &defaultDevicePtr );
4109 if ( FAILED( hr ) ) {
4110 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default capture device handle.";
4115 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eRender, eConsole, &defaultDevicePtr );
4116 if ( FAILED( hr ) ) {
4117 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default render device handle.";
4122 hr = defaultDevicePtr->OpenPropertyStore( STGM_READ, &defaultDevicePropStore );
4123 if ( FAILED( hr ) ) {
4124 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open default device property store.";
4127 PropVariantInit( &defaultDeviceNameProp );
4129 hr = defaultDevicePropStore->GetValue( PKEY_Device_FriendlyName, &defaultDeviceNameProp );
4130 if ( FAILED( hr ) ) {
4131 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default device property: PKEY_Device_FriendlyName.";
4135 defaultDeviceName = convertCharPointerToStdString(defaultDeviceNameProp.pwszVal);
4138 hr = devicePtr->OpenPropertyStore( STGM_READ, &devicePropStore );
4139 if ( FAILED( hr ) ) {
4140 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open device property store.";
4144 PropVariantInit( &deviceNameProp );
4146 hr = devicePropStore->GetValue( PKEY_Device_FriendlyName, &deviceNameProp );
4147 if ( FAILED( hr ) ) {
4148 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device property: PKEY_Device_FriendlyName.";
4152 info.name =convertCharPointerToStdString(deviceNameProp.pwszVal);
4155 if ( isCaptureDevice ) {
4156 info.isDefaultInput = info.name == defaultDeviceName;
4157 info.isDefaultOutput = false;
4160 info.isDefaultInput = false;
4161 info.isDefaultOutput = info.name == defaultDeviceName;
4165 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL, NULL, ( void** ) &audioClient );
4166 if ( FAILED( hr ) ) {
4167 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device audio client.";
4171 hr = audioClient->GetMixFormat( &deviceFormat );
4172 if ( FAILED( hr ) ) {
4173 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device mix format.";
4177 if ( isCaptureDevice ) {
4178 info.inputChannels = deviceFormat->nChannels;
4179 info.outputChannels = 0;
4180 info.duplexChannels = 0;
4183 info.inputChannels = 0;
4184 info.outputChannels = deviceFormat->nChannels;
4185 info.duplexChannels = 0;
4188 // sample rates (WASAPI only supports the one native sample rate)
4189 info.preferredSampleRate = deviceFormat->nSamplesPerSec;
4191 info.sampleRates.clear();
4192 info.sampleRates.push_back( deviceFormat->nSamplesPerSec );
4195 info.nativeFormats = 0;
4197 if ( deviceFormat->wFormatTag == WAVE_FORMAT_IEEE_FLOAT ||
4198 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
4199 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_IEEE_FLOAT ) )
4201 if ( deviceFormat->wBitsPerSample == 32 ) {
4202 info.nativeFormats |= RTAUDIO_FLOAT32;
4204 else if ( deviceFormat->wBitsPerSample == 64 ) {
4205 info.nativeFormats |= RTAUDIO_FLOAT64;
4208 else if ( deviceFormat->wFormatTag == WAVE_FORMAT_PCM ||
4209 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
4210 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_PCM ) )
4212 if ( deviceFormat->wBitsPerSample == 8 ) {
4213 info.nativeFormats |= RTAUDIO_SINT8;
4215 else if ( deviceFormat->wBitsPerSample == 16 ) {
4216 info.nativeFormats |= RTAUDIO_SINT16;
4218 else if ( deviceFormat->wBitsPerSample == 24 ) {
4219 info.nativeFormats |= RTAUDIO_SINT24;
4221 else if ( deviceFormat->wBitsPerSample == 32 ) {
4222 info.nativeFormats |= RTAUDIO_SINT32;
4230 // release all references
4231 PropVariantClear( &deviceNameProp );
4232 PropVariantClear( &defaultDeviceNameProp );
4234 SAFE_RELEASE( captureDevices );
4235 SAFE_RELEASE( renderDevices );
4236 SAFE_RELEASE( devicePtr );
4237 SAFE_RELEASE( defaultDevicePtr );
4238 SAFE_RELEASE( audioClient );
4239 SAFE_RELEASE( devicePropStore );
4240 SAFE_RELEASE( defaultDevicePropStore );
4242 CoTaskMemFree( deviceFormat );
4243 CoTaskMemFree( closestMatchFormat );
4245 if ( !errorText_.empty() )
4250 //-----------------------------------------------------------------------------
4252 unsigned int RtApiWasapi::getDefaultOutputDevice( void )
4254 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
4255 if ( getDeviceInfo( i ).isDefaultOutput ) {
4263 //-----------------------------------------------------------------------------
4265 unsigned int RtApiWasapi::getDefaultInputDevice( void )
4267 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
4268 if ( getDeviceInfo( i ).isDefaultInput ) {
4276 //-----------------------------------------------------------------------------
4278 void RtApiWasapi::closeStream( void )
4280 if ( stream_.state == STREAM_CLOSED ) {
4281 errorText_ = "RtApiWasapi::closeStream: No open stream to close.";
4282 error( RtAudioError::WARNING );
4286 if ( stream_.state != STREAM_STOPPED )
4289 // clean up stream memory
4290 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient )
4291 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient )
4293 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureClient )
4294 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderClient )
4296 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent )
4297 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent );
4299 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent )
4300 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent );
4302 delete ( WasapiHandle* ) stream_.apiHandle;
4303 stream_.apiHandle = NULL;
4305 for ( int i = 0; i < 2; i++ ) {
4306 if ( stream_.userBuffer[i] ) {
4307 free( stream_.userBuffer[i] );
4308 stream_.userBuffer[i] = 0;
4312 if ( stream_.deviceBuffer ) {
4313 free( stream_.deviceBuffer );
4314 stream_.deviceBuffer = 0;
4317 // update stream state
4318 stream_.state = STREAM_CLOSED;
4321 //-----------------------------------------------------------------------------
4323 void RtApiWasapi::startStream( void )
4327 if ( stream_.state == STREAM_RUNNING ) {
4328 errorText_ = "RtApiWasapi::startStream: The stream is already running.";
4329 error( RtAudioError::WARNING );
4333 // update stream state
4334 stream_.state = STREAM_RUNNING;
4336 // create WASAPI stream thread
4337 stream_.callbackInfo.thread = ( ThreadHandle ) CreateThread( NULL, 0, runWasapiThread, this, CREATE_SUSPENDED, NULL );
4339 if ( !stream_.callbackInfo.thread ) {
4340 errorText_ = "RtApiWasapi::startStream: Unable to instantiate callback thread.";
4341 error( RtAudioError::THREAD_ERROR );
4344 SetThreadPriority( ( void* ) stream_.callbackInfo.thread, stream_.callbackInfo.priority );
4345 ResumeThread( ( void* ) stream_.callbackInfo.thread );
4349 //-----------------------------------------------------------------------------
4351 void RtApiWasapi::stopStream( void )
4355 if ( stream_.state == STREAM_STOPPED ) {
4356 errorText_ = "RtApiWasapi::stopStream: The stream is already stopped.";
4357 error( RtAudioError::WARNING );
4361 // inform stream thread by setting stream state to STREAM_STOPPING
4362 stream_.state = STREAM_STOPPING;
4364 // wait until stream thread is stopped
4365 while( stream_.state != STREAM_STOPPED ) {
4369 // Wait for the last buffer to play before stopping.
4370 Sleep( 1000 * stream_.bufferSize / stream_.sampleRate );
4372 // stop capture client if applicable
4373 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) {
4374 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient->Stop();
4375 if ( FAILED( hr ) ) {
4376 errorText_ = "RtApiWasapi::stopStream: Unable to stop capture stream.";
4377 error( RtAudioError::DRIVER_ERROR );
4382 // stop render client if applicable
4383 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) {
4384 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient->Stop();
4385 if ( FAILED( hr ) ) {
4386 errorText_ = "RtApiWasapi::stopStream: Unable to stop render stream.";
4387 error( RtAudioError::DRIVER_ERROR );
4392 // close thread handle
4393 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
4394 errorText_ = "RtApiWasapi::stopStream: Unable to close callback thread.";
4395 error( RtAudioError::THREAD_ERROR );
4399 stream_.callbackInfo.thread = (ThreadHandle) NULL;
4402 //-----------------------------------------------------------------------------
4404 void RtApiWasapi::abortStream( void )
4408 if ( stream_.state == STREAM_STOPPED ) {
4409 errorText_ = "RtApiWasapi::abortStream: The stream is already stopped.";
4410 error( RtAudioError::WARNING );
4414 // inform stream thread by setting stream state to STREAM_STOPPING
4415 stream_.state = STREAM_STOPPING;
4417 // wait until stream thread is stopped
4418 while ( stream_.state != STREAM_STOPPED ) {
4422 // stop capture client if applicable
4423 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) {
4424 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient->Stop();
4425 if ( FAILED( hr ) ) {
4426 errorText_ = "RtApiWasapi::abortStream: Unable to stop capture stream.";
4427 error( RtAudioError::DRIVER_ERROR );
4432 // stop render client if applicable
4433 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) {
4434 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient->Stop();
4435 if ( FAILED( hr ) ) {
4436 errorText_ = "RtApiWasapi::abortStream: Unable to stop render stream.";
4437 error( RtAudioError::DRIVER_ERROR );
4442 // close thread handle
4443 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
4444 errorText_ = "RtApiWasapi::abortStream: Unable to close callback thread.";
4445 error( RtAudioError::THREAD_ERROR );
4449 stream_.callbackInfo.thread = (ThreadHandle) NULL;
4452 //-----------------------------------------------------------------------------
4454 bool RtApiWasapi::probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
4455 unsigned int firstChannel, unsigned int sampleRate,
4456 RtAudioFormat format, unsigned int* bufferSize,
4457 RtAudio::StreamOptions* options )
4459 bool methodResult = FAILURE;
4460 unsigned int captureDeviceCount = 0;
4461 unsigned int renderDeviceCount = 0;
4463 IMMDeviceCollection* captureDevices = NULL;
4464 IMMDeviceCollection* renderDevices = NULL;
4465 IMMDevice* devicePtr = NULL;
4466 WAVEFORMATEX* deviceFormat = NULL;
4467 unsigned int bufferBytes;
4468 stream_.state = STREAM_STOPPED;
4469 RtAudio::DeviceInfo deviceInfo;
4471 // create API Handle if not already created
4472 if ( !stream_.apiHandle )
4473 stream_.apiHandle = ( void* ) new WasapiHandle();
4475 // Count capture devices
4477 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4478 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4479 if ( FAILED( hr ) ) {
4480 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device collection.";
4484 hr = captureDevices->GetCount( &captureDeviceCount );
4485 if ( FAILED( hr ) ) {
4486 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device count.";
4490 // Count render devices
4491 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4492 if ( FAILED( hr ) ) {
4493 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device collection.";
4497 hr = renderDevices->GetCount( &renderDeviceCount );
4498 if ( FAILED( hr ) ) {
4499 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device count.";
4503 // validate device index
4504 if ( device >= captureDeviceCount + renderDeviceCount ) {
4505 errorType = RtAudioError::INVALID_USE;
4506 errorText_ = "RtApiWasapi::probeDeviceOpen: Invalid device index.";
4510 deviceInfo = getDeviceInfo( device );
4512 // validate sample rate
4513 if ( sampleRate != deviceInfo.preferredSampleRate )
4515 errorType = RtAudioError::INVALID_USE;
4516 std::stringstream ss;
4517 ss << "RtApiWasapi::probeDeviceOpen: " << sampleRate
4518 << "Hz sample rate not supported. This device only supports "
4519 << deviceInfo.preferredSampleRate << "Hz.";
4520 errorText_ = ss.str();
4524 // determine whether index falls within capture or render devices
4525 if ( device >= renderDeviceCount ) {
4526 if ( mode != INPUT ) {
4527 errorType = RtAudioError::INVALID_USE;
4528 errorText_ = "RtApiWasapi::probeDeviceOpen: Capture device selected as output device.";
4532 // retrieve captureAudioClient from devicePtr
4533 IAudioClient*& captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4535 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
4536 if ( FAILED( hr ) ) {
4537 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device handle.";
4541 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4542 NULL, ( void** ) &captureAudioClient );
4543 if ( FAILED( hr ) ) {
4544 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device audio client.";
4548 hr = captureAudioClient->GetMixFormat( &deviceFormat );
4549 if ( FAILED( hr ) ) {
4550 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device mix format.";
4554 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4555 captureAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4558 if ( mode != OUTPUT ) {
4559 errorType = RtAudioError::INVALID_USE;
4560 errorText_ = "RtApiWasapi::probeDeviceOpen: Render device selected as input device.";
4564 // retrieve renderAudioClient from devicePtr
4565 IAudioClient*& renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4567 hr = renderDevices->Item( device, &devicePtr );
4568 if ( FAILED( hr ) ) {
4569 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device handle.";
4573 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4574 NULL, ( void** ) &renderAudioClient );
4575 if ( FAILED( hr ) ) {
4576 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device audio client.";
4580 hr = renderAudioClient->GetMixFormat( &deviceFormat );
4581 if ( FAILED( hr ) ) {
4582 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device mix format.";
4586 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4587 renderAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4591 if ( ( stream_.mode == OUTPUT && mode == INPUT ) ||
4592 ( stream_.mode == INPUT && mode == OUTPUT ) ) {
4593 stream_.mode = DUPLEX;
4596 stream_.mode = mode;
4599 stream_.device[mode] = device;
4600 stream_.doByteSwap[mode] = false;
4601 stream_.sampleRate = sampleRate;
4602 stream_.bufferSize = *bufferSize;
4603 stream_.nBuffers = 1;
4604 stream_.nUserChannels[mode] = channels;
4605 stream_.channelOffset[mode] = firstChannel;
4606 stream_.userFormat = format;
4607 stream_.deviceFormat[mode] = deviceInfo.nativeFormats;
4609 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
4610 stream_.userInterleaved = false;
4612 stream_.userInterleaved = true;
4613 stream_.deviceInterleaved[mode] = true;
4615 // Set flags for buffer conversion.
4616 stream_.doConvertBuffer[mode] = false;
4617 if ( stream_.userFormat != stream_.deviceFormat[mode] ||
4618 stream_.nUserChannels != stream_.nDeviceChannels )
4619 stream_.doConvertBuffer[mode] = true;
4620 else if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
4621 stream_.nUserChannels[mode] > 1 )
4622 stream_.doConvertBuffer[mode] = true;
4624 if ( stream_.doConvertBuffer[mode] )
4625 setConvertInfo( mode, 0 );
4627 // Allocate necessary internal buffers
4628 bufferBytes = stream_.nUserChannels[mode] * stream_.bufferSize * formatBytes( stream_.userFormat );
4630 stream_.userBuffer[mode] = ( char* ) calloc( bufferBytes, 1 );
4631 if ( !stream_.userBuffer[mode] ) {
4632 errorType = RtAudioError::MEMORY_ERROR;
4633 errorText_ = "RtApiWasapi::probeDeviceOpen: Error allocating user buffer memory.";
4637 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME )
4638 stream_.callbackInfo.priority = 15;
4640 stream_.callbackInfo.priority = 0;
4642 ///! TODO: RTAUDIO_MINIMIZE_LATENCY // Provide stream buffers directly to callback
4643 ///! TODO: RTAUDIO_HOG_DEVICE // Exclusive mode
4645 methodResult = SUCCESS;
4649 SAFE_RELEASE( captureDevices );
4650 SAFE_RELEASE( renderDevices );
4651 SAFE_RELEASE( devicePtr );
4652 CoTaskMemFree( deviceFormat );
4654 // if method failed, close the stream
4655 if ( methodResult == FAILURE )
4658 if ( !errorText_.empty() )
4660 return methodResult;
4663 //=============================================================================
4665 DWORD WINAPI RtApiWasapi::runWasapiThread( void* wasapiPtr )
4668 ( ( RtApiWasapi* ) wasapiPtr )->wasapiThread();
4673 DWORD WINAPI RtApiWasapi::stopWasapiThread( void* wasapiPtr )
4676 ( ( RtApiWasapi* ) wasapiPtr )->stopStream();
4681 DWORD WINAPI RtApiWasapi::abortWasapiThread( void* wasapiPtr )
4684 ( ( RtApiWasapi* ) wasapiPtr )->abortStream();
4689 //-----------------------------------------------------------------------------
4691 void RtApiWasapi::wasapiThread()
4693 // as this is a new thread, we must CoInitialize it
4694 CoInitialize( NULL );
4698 IAudioClient* captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4699 IAudioClient* renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4700 IAudioCaptureClient* captureClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureClient;
4701 IAudioRenderClient* renderClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderClient;
4702 HANDLE captureEvent = ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent;
4703 HANDLE renderEvent = ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent;
4705 WAVEFORMATEX* captureFormat = NULL;
4706 WAVEFORMATEX* renderFormat = NULL;
4707 WasapiBuffer captureBuffer;
4708 WasapiBuffer renderBuffer;
4710 // declare local stream variables
4711 RtAudioCallback callback = ( RtAudioCallback ) stream_.callbackInfo.callback;
4712 BYTE* streamBuffer = NULL;
4713 unsigned long captureFlags = 0;
4714 unsigned int bufferFrameCount = 0;
4715 unsigned int numFramesPadding = 0;
4716 bool callbackPushed = false;
4717 bool callbackPulled = false;
4718 bool callbackStopped = false;
4719 int callbackResult = 0;
4721 unsigned int deviceBuffSize = 0;
4724 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4726 // Attempt to assign "Pro Audio" characteristic to thread
4727 HMODULE AvrtDll = LoadLibrary( (LPCTSTR) "AVRT.dll" );
4729 DWORD taskIndex = 0;
4730 TAvSetMmThreadCharacteristicsPtr AvSetMmThreadCharacteristicsPtr = ( TAvSetMmThreadCharacteristicsPtr ) GetProcAddress( AvrtDll, "AvSetMmThreadCharacteristicsW" );
4731 AvSetMmThreadCharacteristicsPtr( L"Pro Audio", &taskIndex );
4732 FreeLibrary( AvrtDll );
4735 // start capture stream if applicable
4736 if ( captureAudioClient ) {
4737 hr = captureAudioClient->GetMixFormat( &captureFormat );
4738 if ( FAILED( hr ) ) {
4739 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
4743 // initialize capture stream according to desire buffer size
4744 REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) stream_.bufferSize * 10000000 / captureFormat->nSamplesPerSec );
4746 if ( !captureClient ) {
4747 hr = captureAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
4748 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
4749 desiredBufferPeriod,
4750 desiredBufferPeriod,
4753 if ( FAILED( hr ) ) {
4754 errorText_ = "RtApiWasapi::wasapiThread: Unable to initialize capture audio client.";
4758 hr = captureAudioClient->GetService( __uuidof( IAudioCaptureClient ),
4759 ( void** ) &captureClient );
4760 if ( FAILED( hr ) ) {
4761 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve capture client handle.";
4765 // configure captureEvent to trigger on every available capture buffer
4766 captureEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
4767 if ( !captureEvent ) {
4768 errorType = RtAudioError::SYSTEM_ERROR;
4769 errorText_ = "RtApiWasapi::wasapiThread: Unable to create capture event.";
4773 hr = captureAudioClient->SetEventHandle( captureEvent );
4774 if ( FAILED( hr ) ) {
4775 errorText_ = "RtApiWasapi::wasapiThread: Unable to set capture event handle.";
4779 ( ( WasapiHandle* ) stream_.apiHandle )->captureClient = captureClient;
4780 ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent = captureEvent;
4783 unsigned int inBufferSize = 0;
4784 hr = captureAudioClient->GetBufferSize( &inBufferSize );
4785 if ( FAILED( hr ) ) {
4786 errorText_ = "RtApiWasapi::wasapiThread: Unable to get capture buffer size.";
4790 // scale outBufferSize according to stream->user sample rate ratio
4791 unsigned int outBufferSize = ( unsigned int ) stream_.bufferSize * stream_.nDeviceChannels[INPUT];
4792 inBufferSize *= stream_.nDeviceChannels[INPUT];
4794 // set captureBuffer size
4795 captureBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[INPUT] ) );
4797 // reset the capture stream
4798 hr = captureAudioClient->Reset();
4799 if ( FAILED( hr ) ) {
4800 errorText_ = "RtApiWasapi::wasapiThread: Unable to reset capture stream.";
4804 // start the capture stream
4805 hr = captureAudioClient->Start();
4806 if ( FAILED( hr ) ) {
4807 errorText_ = "RtApiWasapi::wasapiThread: Unable to start capture stream.";
4812 // start render stream if applicable
4813 if ( renderAudioClient ) {
4814 hr = renderAudioClient->GetMixFormat( &renderFormat );
4815 if ( FAILED( hr ) ) {
4816 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
4820 // initialize render stream according to desire buffer size
4821 REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) stream_.bufferSize * 10000000 / renderFormat->nSamplesPerSec );
4823 if ( !renderClient ) {
4824 hr = renderAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
4825 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
4826 desiredBufferPeriod,
4827 desiredBufferPeriod,
4830 if ( FAILED( hr ) ) {
4831 errorText_ = "RtApiWasapi::wasapiThread: Unable to initialize render audio client.";
4835 hr = renderAudioClient->GetService( __uuidof( IAudioRenderClient ),
4836 ( void** ) &renderClient );
4837 if ( FAILED( hr ) ) {
4838 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render client handle.";
4842 // configure renderEvent to trigger on every available render buffer
4843 renderEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
4844 if ( !renderEvent ) {
4845 errorType = RtAudioError::SYSTEM_ERROR;
4846 errorText_ = "RtApiWasapi::wasapiThread: Unable to create render event.";
4850 hr = renderAudioClient->SetEventHandle( renderEvent );
4851 if ( FAILED( hr ) ) {
4852 errorText_ = "RtApiWasapi::wasapiThread: Unable to set render event handle.";
4856 ( ( WasapiHandle* ) stream_.apiHandle )->renderClient = renderClient;
4857 ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent = renderEvent;
4860 unsigned int outBufferSize = 0;
4861 hr = renderAudioClient->GetBufferSize( &outBufferSize );
4862 if ( FAILED( hr ) ) {
4863 errorText_ = "RtApiWasapi::wasapiThread: Unable to get render buffer size.";
4867 // scale inBufferSize according to user->stream sample rate ratio
4868 unsigned int inBufferSize = ( unsigned int ) stream_.bufferSize * stream_.nDeviceChannels[OUTPUT];
4869 outBufferSize *= stream_.nDeviceChannels[OUTPUT];
4871 // set renderBuffer size
4872 renderBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[OUTPUT] ) );
4874 // reset the render stream
4875 hr = renderAudioClient->Reset();
4876 if ( FAILED( hr ) ) {
4877 errorText_ = "RtApiWasapi::wasapiThread: Unable to reset render stream.";
4881 // start the render stream
4882 hr = renderAudioClient->Start();
4883 if ( FAILED( hr ) ) {
4884 errorText_ = "RtApiWasapi::wasapiThread: Unable to start render stream.";
4889 if ( stream_.mode == INPUT ) {
4890 using namespace std; // for roundf
4891 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
4893 else if ( stream_.mode == OUTPUT ) {
4894 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
4896 else if ( stream_.mode == DUPLEX ) {
4897 deviceBuffSize = std::max( stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
4898 stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
4901 stream_.deviceBuffer = ( char* ) malloc( deviceBuffSize );
4902 if ( !stream_.deviceBuffer ) {
4903 errorType = RtAudioError::MEMORY_ERROR;
4904 errorText_ = "RtApiWasapi::wasapiThread: Error allocating device buffer memory.";
4908 // stream process loop
4909 while ( stream_.state != STREAM_STOPPING ) {
4910 if ( !callbackPulled ) {
4913 // 1. Pull callback buffer from inputBuffer
4914 // 2. If 1. was successful: Convert callback buffer to user format
4916 if ( captureAudioClient ) {
4917 // Pull callback buffer from inputBuffer
4918 callbackPulled = captureBuffer.pullBuffer( stream_.deviceBuffer,
4919 ( unsigned int ) stream_.bufferSize * stream_.nDeviceChannels[INPUT],
4920 stream_.deviceFormat[INPUT] );
4922 if ( callbackPulled ) {
4923 if ( stream_.doConvertBuffer[INPUT] ) {
4924 // Convert callback buffer to user format
4925 convertBuffer( stream_.userBuffer[INPUT],
4926 stream_.deviceBuffer,
4927 stream_.convertInfo[INPUT] );
4930 // no further conversion, simple copy deviceBuffer to userBuffer
4931 memcpy( stream_.userBuffer[INPUT],
4932 stream_.deviceBuffer,
4933 stream_.bufferSize * stream_.nUserChannels[INPUT] * formatBytes( stream_.userFormat ) );
4938 // if there is no capture stream, set callbackPulled flag
4939 callbackPulled = true;
4944 // 1. Execute user callback method
4945 // 2. Handle return value from callback
4947 // if callback has not requested the stream to stop
4948 if ( callbackPulled && !callbackStopped ) {
4949 // Execute user callback method
4950 callbackResult = callback( stream_.userBuffer[OUTPUT],
4951 stream_.userBuffer[INPUT],
4954 captureFlags & AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY ? RTAUDIO_INPUT_OVERFLOW : 0,
4955 stream_.callbackInfo.userData );
4957 // Handle return value from callback
4958 if ( callbackResult == 1 ) {
4959 // instantiate a thread to stop this thread
4960 HANDLE threadHandle = CreateThread( NULL, 0, stopWasapiThread, this, 0, NULL );
4961 if ( !threadHandle ) {
4962 errorType = RtAudioError::THREAD_ERROR;
4963 errorText_ = "RtApiWasapi::wasapiThread: Unable to instantiate stream stop thread.";
4966 else if ( !CloseHandle( threadHandle ) ) {
4967 errorType = RtAudioError::THREAD_ERROR;
4968 errorText_ = "RtApiWasapi::wasapiThread: Unable to close stream stop thread handle.";
4972 callbackStopped = true;
4974 else if ( callbackResult == 2 ) {
4975 // instantiate a thread to stop this thread
4976 HANDLE threadHandle = CreateThread( NULL, 0, abortWasapiThread, this, 0, NULL );
4977 if ( !threadHandle ) {
4978 errorType = RtAudioError::THREAD_ERROR;
4979 errorText_ = "RtApiWasapi::wasapiThread: Unable to instantiate stream abort thread.";
4982 else if ( !CloseHandle( threadHandle ) ) {
4983 errorType = RtAudioError::THREAD_ERROR;
4984 errorText_ = "RtApiWasapi::wasapiThread: Unable to close stream abort thread handle.";
4988 callbackStopped = true;
4995 // 1. Convert callback buffer to stream format
4996 // 2. Push callback buffer into outputBuffer
4998 if ( renderAudioClient && callbackPulled ) {
4999 if ( stream_.doConvertBuffer[OUTPUT] ) {
5000 // Convert callback buffer to stream format
5001 convertBuffer( stream_.deviceBuffer,
5002 stream_.userBuffer[OUTPUT],
5003 stream_.convertInfo[OUTPUT] );
5007 // Push callback buffer into outputBuffer
5008 callbackPushed = renderBuffer.pushBuffer( stream_.deviceBuffer,
5009 stream_.bufferSize * stream_.nDeviceChannels[OUTPUT],
5010 stream_.deviceFormat[OUTPUT] );
5013 // if there is no render stream, set callbackPushed flag
5014 callbackPushed = true;
5019 // 1. Get capture buffer from stream
5020 // 2. Push capture buffer into inputBuffer
5021 // 3. If 2. was successful: Release capture buffer
5023 if ( captureAudioClient ) {
5024 // if the callback input buffer was not pulled from captureBuffer, wait for next capture event
5025 if ( !callbackPulled ) {
5026 WaitForSingleObject( captureEvent, INFINITE );
5029 // Get capture buffer from stream
5030 hr = captureClient->GetBuffer( &streamBuffer,
5032 &captureFlags, NULL, NULL );
5033 if ( FAILED( hr ) ) {
5034 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve capture buffer.";
5038 if ( bufferFrameCount != 0 ) {
5039 // Push capture buffer into inputBuffer
5040 if ( captureBuffer.pushBuffer( ( char* ) streamBuffer,
5041 bufferFrameCount * stream_.nDeviceChannels[INPUT],
5042 stream_.deviceFormat[INPUT] ) )
5044 // Release capture buffer
5045 hr = captureClient->ReleaseBuffer( bufferFrameCount );
5046 if ( FAILED( hr ) ) {
5047 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5053 // Inform WASAPI that capture was unsuccessful
5054 hr = captureClient->ReleaseBuffer( 0 );
5055 if ( FAILED( hr ) ) {
5056 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5063 // Inform WASAPI that capture was unsuccessful
5064 hr = captureClient->ReleaseBuffer( 0 );
5065 if ( FAILED( hr ) ) {
5066 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5074 // 1. Get render buffer from stream
5075 // 2. Pull next buffer from outputBuffer
5076 // 3. If 2. was successful: Fill render buffer with next buffer
5077 // Release render buffer
5079 if ( renderAudioClient ) {
5080 // if the callback output buffer was not pushed to renderBuffer, wait for next render event
5081 if ( callbackPulled && !callbackPushed ) {
5082 WaitForSingleObject( renderEvent, INFINITE );
5085 // Get render buffer from stream
5086 hr = renderAudioClient->GetBufferSize( &bufferFrameCount );
5087 if ( FAILED( hr ) ) {
5088 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer size.";
5092 hr = renderAudioClient->GetCurrentPadding( &numFramesPadding );
5093 if ( FAILED( hr ) ) {
5094 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer padding.";
5098 bufferFrameCount -= numFramesPadding;
5100 if ( bufferFrameCount != 0 ) {
5101 hr = renderClient->GetBuffer( bufferFrameCount, &streamBuffer );
5102 if ( FAILED( hr ) ) {
5103 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer.";
5107 // Pull next buffer from outputBuffer
5108 // Fill render buffer with next buffer
5109 if ( renderBuffer.pullBuffer( ( char* ) streamBuffer,
5110 bufferFrameCount * stream_.nDeviceChannels[OUTPUT],
5111 stream_.deviceFormat[OUTPUT] ) )
5113 // Release render buffer
5114 hr = renderClient->ReleaseBuffer( bufferFrameCount, 0 );
5115 if ( FAILED( hr ) ) {
5116 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5122 // Inform WASAPI that render was unsuccessful
5123 hr = renderClient->ReleaseBuffer( 0, 0 );
5124 if ( FAILED( hr ) ) {
5125 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5132 // Inform WASAPI that render was unsuccessful
5133 hr = renderClient->ReleaseBuffer( 0, 0 );
5134 if ( FAILED( hr ) ) {
5135 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5141 // if the callback buffer was pushed renderBuffer reset callbackPulled flag
5142 if ( callbackPushed ) {
5143 callbackPulled = false;
5145 RtApi::tickStreamTime();
5152 CoTaskMemFree( captureFormat );
5153 CoTaskMemFree( renderFormat );
5157 // update stream state
5158 stream_.state = STREAM_STOPPED;
5160 if ( errorText_.empty() )
5166 //******************** End of __WINDOWS_WASAPI__ *********************//
5170 #if defined(__WINDOWS_DS__) // Windows DirectSound API
5172 // Modified by Robin Davies, October 2005
5173 // - Improvements to DirectX pointer chasing.
5174 // - Bug fix for non-power-of-two Asio granularity used by Edirol PCR-A30.
5175 // - Auto-call CoInitialize for DSOUND and ASIO platforms.
5176 // Various revisions for RtAudio 4.0 by Gary Scavone, April 2007
5177 // Changed device query structure for RtAudio 4.0.7, January 2010
5179 #include <windows.h>
5180 #include <process.h>
5181 #include <mmsystem.h>
5185 #include <algorithm>
5187 #if defined(__MINGW32__)
5188 // missing from latest mingw winapi
5189 #define WAVE_FORMAT_96M08 0x00010000 /* 96 kHz, Mono, 8-bit */
5190 #define WAVE_FORMAT_96S08 0x00020000 /* 96 kHz, Stereo, 8-bit */
5191 #define WAVE_FORMAT_96M16 0x00040000 /* 96 kHz, Mono, 16-bit */
5192 #define WAVE_FORMAT_96S16 0x00080000 /* 96 kHz, Stereo, 16-bit */
5195 #define MINIMUM_DEVICE_BUFFER_SIZE 32768
5197 #ifdef _MSC_VER // if Microsoft Visual C++
5198 #pragma comment( lib, "winmm.lib" ) // then, auto-link winmm.lib. Otherwise, it has to be added manually.
5201 static inline DWORD dsPointerBetween( DWORD pointer, DWORD laterPointer, DWORD earlierPointer, DWORD bufferSize )
5203 if ( pointer > bufferSize ) pointer -= bufferSize;
5204 if ( laterPointer < earlierPointer ) laterPointer += bufferSize;
5205 if ( pointer < earlierPointer ) pointer += bufferSize;
5206 return pointer >= earlierPointer && pointer < laterPointer;
5209 // A structure to hold various information related to the DirectSound
5210 // API implementation.
5212 unsigned int drainCounter; // Tracks callback counts when draining
5213 bool internalDrain; // Indicates if stop is initiated from callback or not.
5217 UINT bufferPointer[2];
5218 DWORD dsBufferSize[2];
5219 DWORD dsPointerLeadTime[2]; // the number of bytes ahead of the safe pointer to lead by.
5223 :drainCounter(0), internalDrain(false) { id[0] = 0; id[1] = 0; buffer[0] = 0; buffer[1] = 0; xrun[0] = false; xrun[1] = false; bufferPointer[0] = 0; bufferPointer[1] = 0; }
5226 // Declarations for utility functions, callbacks, and structures
5227 // specific to the DirectSound implementation.
5228 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
5229 LPCTSTR description,
5233 static const char* getErrorString( int code );
5235 static unsigned __stdcall callbackHandler( void *ptr );
5244 : found(false) { validId[0] = false; validId[1] = false; }
5247 struct DsProbeData {
5249 std::vector<struct DsDevice>* dsDevices;
5252 RtApiDs :: RtApiDs()
5254 // Dsound will run both-threaded. If CoInitialize fails, then just
5255 // accept whatever the mainline chose for a threading model.
5256 coInitialized_ = false;
5257 HRESULT hr = CoInitialize( NULL );
5258 if ( !FAILED( hr ) ) coInitialized_ = true;
5261 RtApiDs :: ~RtApiDs()
5263 if ( stream_.state != STREAM_CLOSED ) closeStream();
5264 if ( coInitialized_ ) CoUninitialize(); // balanced call.
5267 // The DirectSound default output is always the first device.
5268 unsigned int RtApiDs :: getDefaultOutputDevice( void )
5273 // The DirectSound default input is always the first input device,
5274 // which is the first capture device enumerated.
5275 unsigned int RtApiDs :: getDefaultInputDevice( void )
5280 unsigned int RtApiDs :: getDeviceCount( void )
5282 // Set query flag for previously found devices to false, so that we
5283 // can check for any devices that have disappeared.
5284 for ( unsigned int i=0; i<dsDevices.size(); i++ )
5285 dsDevices[i].found = false;
5287 // Query DirectSound devices.
5288 struct DsProbeData probeInfo;
5289 probeInfo.isInput = false;
5290 probeInfo.dsDevices = &dsDevices;
5291 HRESULT result = DirectSoundEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
5292 if ( FAILED( result ) ) {
5293 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating output devices!";
5294 errorText_ = errorStream_.str();
5295 error( RtAudioError::WARNING );
5298 // Query DirectSoundCapture devices.
5299 probeInfo.isInput = true;
5300 result = DirectSoundCaptureEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
5301 if ( FAILED( result ) ) {
5302 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating input devices!";
5303 errorText_ = errorStream_.str();
5304 error( RtAudioError::WARNING );
5307 // Clean out any devices that may have disappeared (code update submitted by Eli Zehngut).
5308 for ( unsigned int i=0; i<dsDevices.size(); ) {
5309 if ( dsDevices[i].found == false ) dsDevices.erase( dsDevices.begin() + i );
5313 return static_cast<unsigned int>(dsDevices.size());
5316 RtAudio::DeviceInfo RtApiDs :: getDeviceInfo( unsigned int device )
5318 RtAudio::DeviceInfo info;
5319 info.probed = false;
5321 if ( dsDevices.size() == 0 ) {
5322 // Force a query of all devices
5324 if ( dsDevices.size() == 0 ) {
5325 errorText_ = "RtApiDs::getDeviceInfo: no devices found!";
5326 error( RtAudioError::INVALID_USE );
5331 if ( device >= dsDevices.size() ) {
5332 errorText_ = "RtApiDs::getDeviceInfo: device ID is invalid!";
5333 error( RtAudioError::INVALID_USE );
5338 if ( dsDevices[ device ].validId[0] == false ) goto probeInput;
5340 LPDIRECTSOUND output;
5342 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
5343 if ( FAILED( result ) ) {
5344 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
5345 errorText_ = errorStream_.str();
5346 error( RtAudioError::WARNING );
5350 outCaps.dwSize = sizeof( outCaps );
5351 result = output->GetCaps( &outCaps );
5352 if ( FAILED( result ) ) {
5354 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting capabilities!";
5355 errorText_ = errorStream_.str();
5356 error( RtAudioError::WARNING );
5360 // Get output channel information.
5361 info.outputChannels = ( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ? 2 : 1;
5363 // Get sample rate information.
5364 info.sampleRates.clear();
5365 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
5366 if ( SAMPLE_RATES[k] >= (unsigned int) outCaps.dwMinSecondarySampleRate &&
5367 SAMPLE_RATES[k] <= (unsigned int) outCaps.dwMaxSecondarySampleRate ) {
5368 info.sampleRates.push_back( SAMPLE_RATES[k] );
5370 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
5371 info.preferredSampleRate = SAMPLE_RATES[k];
5375 // Get format information.
5376 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT ) info.nativeFormats |= RTAUDIO_SINT16;
5377 if ( outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) info.nativeFormats |= RTAUDIO_SINT8;
5381 if ( getDefaultOutputDevice() == device )
5382 info.isDefaultOutput = true;
5384 if ( dsDevices[ device ].validId[1] == false ) {
5385 info.name = dsDevices[ device ].name;
5392 LPDIRECTSOUNDCAPTURE input;
5393 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
5394 if ( FAILED( result ) ) {
5395 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
5396 errorText_ = errorStream_.str();
5397 error( RtAudioError::WARNING );
5402 inCaps.dwSize = sizeof( inCaps );
5403 result = input->GetCaps( &inCaps );
5404 if ( FAILED( result ) ) {
5406 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting object capabilities (" << dsDevices[ device ].name << ")!";
5407 errorText_ = errorStream_.str();
5408 error( RtAudioError::WARNING );
5412 // Get input channel information.
5413 info.inputChannels = inCaps.dwChannels;
5415 // Get sample rate and format information.
5416 std::vector<unsigned int> rates;
5417 if ( inCaps.dwChannels >= 2 ) {
5418 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5419 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5420 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5421 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5422 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5423 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5424 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5425 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5427 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
5428 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) rates.push_back( 11025 );
5429 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) rates.push_back( 22050 );
5430 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) rates.push_back( 44100 );
5431 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) rates.push_back( 96000 );
5433 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
5434 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) rates.push_back( 11025 );
5435 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) rates.push_back( 22050 );
5436 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) rates.push_back( 44100 );
5437 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) rates.push_back( 96000 );
5440 else if ( inCaps.dwChannels == 1 ) {
5441 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5442 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5443 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5444 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5445 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5446 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5447 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5448 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5450 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
5451 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) rates.push_back( 11025 );
5452 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) rates.push_back( 22050 );
5453 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) rates.push_back( 44100 );
5454 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) rates.push_back( 96000 );
5456 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
5457 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) rates.push_back( 11025 );
5458 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) rates.push_back( 22050 );
5459 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) rates.push_back( 44100 );
5460 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) rates.push_back( 96000 );
5463 else info.inputChannels = 0; // technically, this would be an error
5467 if ( info.inputChannels == 0 ) return info;
5469 // Copy the supported rates to the info structure but avoid duplication.
5471 for ( unsigned int i=0; i<rates.size(); i++ ) {
5473 for ( unsigned int j=0; j<info.sampleRates.size(); j++ ) {
5474 if ( rates[i] == info.sampleRates[j] ) {
5479 if ( found == false ) info.sampleRates.push_back( rates[i] );
5481 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
5483 // If device opens for both playback and capture, we determine the channels.
5484 if ( info.outputChannels > 0 && info.inputChannels > 0 )
5485 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
5487 if ( device == 0 ) info.isDefaultInput = true;
5489 // Copy name and return.
5490 info.name = dsDevices[ device ].name;
5495 bool RtApiDs :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
5496 unsigned int firstChannel, unsigned int sampleRate,
5497 RtAudioFormat format, unsigned int *bufferSize,
5498 RtAudio::StreamOptions *options )
5500 if ( channels + firstChannel > 2 ) {
5501 errorText_ = "RtApiDs::probeDeviceOpen: DirectSound does not support more than 2 channels per device.";
5505 size_t nDevices = dsDevices.size();
5506 if ( nDevices == 0 ) {
5507 // This should not happen because a check is made before this function is called.
5508 errorText_ = "RtApiDs::probeDeviceOpen: no devices found!";
5512 if ( device >= nDevices ) {
5513 // This should not happen because a check is made before this function is called.
5514 errorText_ = "RtApiDs::probeDeviceOpen: device ID is invalid!";
5518 if ( mode == OUTPUT ) {
5519 if ( dsDevices[ device ].validId[0] == false ) {
5520 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support output!";
5521 errorText_ = errorStream_.str();
5525 else { // mode == INPUT
5526 if ( dsDevices[ device ].validId[1] == false ) {
5527 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support input!";
5528 errorText_ = errorStream_.str();
5533 // According to a note in PortAudio, using GetDesktopWindow()
5534 // instead of GetForegroundWindow() is supposed to avoid problems
5535 // that occur when the application's window is not the foreground
5536 // window. Also, if the application window closes before the
5537 // DirectSound buffer, DirectSound can crash. In the past, I had
5538 // problems when using GetDesktopWindow() but it seems fine now
5539 // (January 2010). I'll leave it commented here.
5540 // HWND hWnd = GetForegroundWindow();
5541 HWND hWnd = GetDesktopWindow();
5543 // Check the numberOfBuffers parameter and limit the lowest value to
5544 // two. This is a judgement call and a value of two is probably too
5545 // low for capture, but it should work for playback.
5547 if ( options ) nBuffers = options->numberOfBuffers;
5548 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) nBuffers = 2;
5549 if ( nBuffers < 2 ) nBuffers = 3;
5551 // Check the lower range of the user-specified buffer size and set
5552 // (arbitrarily) to a lower bound of 32.
5553 if ( *bufferSize < 32 ) *bufferSize = 32;
5555 // Create the wave format structure. The data format setting will
5556 // be determined later.
5557 WAVEFORMATEX waveFormat;
5558 ZeroMemory( &waveFormat, sizeof(WAVEFORMATEX) );
5559 waveFormat.wFormatTag = WAVE_FORMAT_PCM;
5560 waveFormat.nChannels = channels + firstChannel;
5561 waveFormat.nSamplesPerSec = (unsigned long) sampleRate;
5563 // Determine the device buffer size. By default, we'll use the value
5564 // defined above (32K), but we will grow it to make allowances for
5565 // very large software buffer sizes.
5566 DWORD dsBufferSize = MINIMUM_DEVICE_BUFFER_SIZE;
5567 DWORD dsPointerLeadTime = 0;
5569 void *ohandle = 0, *bhandle = 0;
5571 if ( mode == OUTPUT ) {
5573 LPDIRECTSOUND output;
5574 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
5575 if ( FAILED( result ) ) {
5576 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
5577 errorText_ = errorStream_.str();
5582 outCaps.dwSize = sizeof( outCaps );
5583 result = output->GetCaps( &outCaps );
5584 if ( FAILED( result ) ) {
5586 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting capabilities (" << dsDevices[ device ].name << ")!";
5587 errorText_ = errorStream_.str();
5591 // Check channel information.
5592 if ( channels + firstChannel == 2 && !( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ) {
5593 errorStream_ << "RtApiDs::getDeviceInfo: the output device (" << dsDevices[ device ].name << ") does not support stereo playback.";
5594 errorText_ = errorStream_.str();
5598 // Check format information. Use 16-bit format unless not
5599 // supported or user requests 8-bit.
5600 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT &&
5601 !( format == RTAUDIO_SINT8 && outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) ) {
5602 waveFormat.wBitsPerSample = 16;
5603 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
5606 waveFormat.wBitsPerSample = 8;
5607 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
5609 stream_.userFormat = format;
5611 // Update wave format structure and buffer information.
5612 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
5613 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
5614 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
5616 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
5617 while ( dsPointerLeadTime * 2U > dsBufferSize )
5620 // Set cooperative level to DSSCL_EXCLUSIVE ... sound stops when window focus changes.
5621 // result = output->SetCooperativeLevel( hWnd, DSSCL_EXCLUSIVE );
5622 // Set cooperative level to DSSCL_PRIORITY ... sound remains when window focus changes.
5623 result = output->SetCooperativeLevel( hWnd, DSSCL_PRIORITY );
5624 if ( FAILED( result ) ) {
5626 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting cooperative level (" << dsDevices[ device ].name << ")!";
5627 errorText_ = errorStream_.str();
5631 // Even though we will write to the secondary buffer, we need to
5632 // access the primary buffer to set the correct output format
5633 // (since the default is 8-bit, 22 kHz!). Setup the DS primary
5634 // buffer description.
5635 DSBUFFERDESC bufferDescription;
5636 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
5637 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
5638 bufferDescription.dwFlags = DSBCAPS_PRIMARYBUFFER;
5640 // Obtain the primary buffer
5641 LPDIRECTSOUNDBUFFER buffer;
5642 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
5643 if ( FAILED( result ) ) {
5645 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") accessing primary buffer (" << dsDevices[ device ].name << ")!";
5646 errorText_ = errorStream_.str();
5650 // Set the primary DS buffer sound format.
5651 result = buffer->SetFormat( &waveFormat );
5652 if ( FAILED( result ) ) {
5654 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting primary buffer format (" << dsDevices[ device ].name << ")!";
5655 errorText_ = errorStream_.str();
5659 // Setup the secondary DS buffer description.
5660 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
5661 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
5662 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
5663 DSBCAPS_GLOBALFOCUS |
5664 DSBCAPS_GETCURRENTPOSITION2 |
5665 DSBCAPS_LOCHARDWARE ); // Force hardware mixing
5666 bufferDescription.dwBufferBytes = dsBufferSize;
5667 bufferDescription.lpwfxFormat = &waveFormat;
5669 // Try to create the secondary DS buffer. If that doesn't work,
5670 // try to use software mixing. Otherwise, there's a problem.
5671 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
5672 if ( FAILED( result ) ) {
5673 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
5674 DSBCAPS_GLOBALFOCUS |
5675 DSBCAPS_GETCURRENTPOSITION2 |
5676 DSBCAPS_LOCSOFTWARE ); // Force software mixing
5677 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
5678 if ( FAILED( result ) ) {
5680 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating secondary buffer (" << dsDevices[ device ].name << ")!";
5681 errorText_ = errorStream_.str();
5686 // Get the buffer size ... might be different from what we specified.
5688 dsbcaps.dwSize = sizeof( DSBCAPS );
5689 result = buffer->GetCaps( &dsbcaps );
5690 if ( FAILED( result ) ) {
5693 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
5694 errorText_ = errorStream_.str();
5698 dsBufferSize = dsbcaps.dwBufferBytes;
5700 // Lock the DS buffer
5703 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
5704 if ( FAILED( result ) ) {
5707 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking buffer (" << dsDevices[ device ].name << ")!";
5708 errorText_ = errorStream_.str();
5712 // Zero the DS buffer
5713 ZeroMemory( audioPtr, dataLen );
5715 // Unlock the DS buffer
5716 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
5717 if ( FAILED( result ) ) {
5720 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking buffer (" << dsDevices[ device ].name << ")!";
5721 errorText_ = errorStream_.str();
5725 ohandle = (void *) output;
5726 bhandle = (void *) buffer;
5729 if ( mode == INPUT ) {
5731 LPDIRECTSOUNDCAPTURE input;
5732 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
5733 if ( FAILED( result ) ) {
5734 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
5735 errorText_ = errorStream_.str();
5740 inCaps.dwSize = sizeof( inCaps );
5741 result = input->GetCaps( &inCaps );
5742 if ( FAILED( result ) ) {
5744 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting input capabilities (" << dsDevices[ device ].name << ")!";
5745 errorText_ = errorStream_.str();
5749 // Check channel information.
5750 if ( inCaps.dwChannels < channels + firstChannel ) {
5751 errorText_ = "RtApiDs::getDeviceInfo: the input device does not support requested input channels.";
5755 // Check format information. Use 16-bit format unless user
5757 DWORD deviceFormats;
5758 if ( channels + firstChannel == 2 ) {
5759 deviceFormats = WAVE_FORMAT_1S08 | WAVE_FORMAT_2S08 | WAVE_FORMAT_4S08 | WAVE_FORMAT_96S08;
5760 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
5761 waveFormat.wBitsPerSample = 8;
5762 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
5764 else { // assume 16-bit is supported
5765 waveFormat.wBitsPerSample = 16;
5766 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
5769 else { // channel == 1
5770 deviceFormats = WAVE_FORMAT_1M08 | WAVE_FORMAT_2M08 | WAVE_FORMAT_4M08 | WAVE_FORMAT_96M08;
5771 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
5772 waveFormat.wBitsPerSample = 8;
5773 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
5775 else { // assume 16-bit is supported
5776 waveFormat.wBitsPerSample = 16;
5777 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
5780 stream_.userFormat = format;
5782 // Update wave format structure and buffer information.
5783 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
5784 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
5785 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
5787 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
5788 while ( dsPointerLeadTime * 2U > dsBufferSize )
5791 // Setup the secondary DS buffer description.
5792 DSCBUFFERDESC bufferDescription;
5793 ZeroMemory( &bufferDescription, sizeof( DSCBUFFERDESC ) );
5794 bufferDescription.dwSize = sizeof( DSCBUFFERDESC );
5795 bufferDescription.dwFlags = 0;
5796 bufferDescription.dwReserved = 0;
5797 bufferDescription.dwBufferBytes = dsBufferSize;
5798 bufferDescription.lpwfxFormat = &waveFormat;
5800 // Create the capture buffer.
5801 LPDIRECTSOUNDCAPTUREBUFFER buffer;
5802 result = input->CreateCaptureBuffer( &bufferDescription, &buffer, NULL );
5803 if ( FAILED( result ) ) {
5805 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating input buffer (" << dsDevices[ device ].name << ")!";
5806 errorText_ = errorStream_.str();
5810 // Get the buffer size ... might be different from what we specified.
5812 dscbcaps.dwSize = sizeof( DSCBCAPS );
5813 result = buffer->GetCaps( &dscbcaps );
5814 if ( FAILED( result ) ) {
5817 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
5818 errorText_ = errorStream_.str();
5822 dsBufferSize = dscbcaps.dwBufferBytes;
5824 // NOTE: We could have a problem here if this is a duplex stream
5825 // and the play and capture hardware buffer sizes are different
5826 // (I'm actually not sure if that is a problem or not).
5827 // Currently, we are not verifying that.
5829 // Lock the capture buffer
5832 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
5833 if ( FAILED( result ) ) {
5836 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking input buffer (" << dsDevices[ device ].name << ")!";
5837 errorText_ = errorStream_.str();
5842 ZeroMemory( audioPtr, dataLen );
5844 // Unlock the buffer
5845 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
5846 if ( FAILED( result ) ) {
5849 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking input buffer (" << dsDevices[ device ].name << ")!";
5850 errorText_ = errorStream_.str();
5854 ohandle = (void *) input;
5855 bhandle = (void *) buffer;
5858 // Set various stream parameters
5859 DsHandle *handle = 0;
5860 stream_.nDeviceChannels[mode] = channels + firstChannel;
5861 stream_.nUserChannels[mode] = channels;
5862 stream_.bufferSize = *bufferSize;
5863 stream_.channelOffset[mode] = firstChannel;
5864 stream_.deviceInterleaved[mode] = true;
5865 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
5866 else stream_.userInterleaved = true;
5868 // Set flag for buffer conversion
5869 stream_.doConvertBuffer[mode] = false;
5870 if (stream_.nUserChannels[mode] != stream_.nDeviceChannels[mode])
5871 stream_.doConvertBuffer[mode] = true;
5872 if (stream_.userFormat != stream_.deviceFormat[mode])
5873 stream_.doConvertBuffer[mode] = true;
5874 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
5875 stream_.nUserChannels[mode] > 1 )
5876 stream_.doConvertBuffer[mode] = true;
5878 // Allocate necessary internal buffers
5879 long bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
5880 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
5881 if ( stream_.userBuffer[mode] == NULL ) {
5882 errorText_ = "RtApiDs::probeDeviceOpen: error allocating user buffer memory.";
5886 if ( stream_.doConvertBuffer[mode] ) {
5888 bool makeBuffer = true;
5889 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
5890 if ( mode == INPUT ) {
5891 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
5892 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
5893 if ( bufferBytes <= (long) bytesOut ) makeBuffer = false;
5898 bufferBytes *= *bufferSize;
5899 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
5900 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
5901 if ( stream_.deviceBuffer == NULL ) {
5902 errorText_ = "RtApiDs::probeDeviceOpen: error allocating device buffer memory.";
5908 // Allocate our DsHandle structures for the stream.
5909 if ( stream_.apiHandle == 0 ) {
5911 handle = new DsHandle;
5913 catch ( std::bad_alloc& ) {
5914 errorText_ = "RtApiDs::probeDeviceOpen: error allocating AsioHandle memory.";
5918 // Create a manual-reset event.
5919 handle->condition = CreateEvent( NULL, // no security
5920 TRUE, // manual-reset
5921 FALSE, // non-signaled initially
5923 stream_.apiHandle = (void *) handle;
5926 handle = (DsHandle *) stream_.apiHandle;
5927 handle->id[mode] = ohandle;
5928 handle->buffer[mode] = bhandle;
5929 handle->dsBufferSize[mode] = dsBufferSize;
5930 handle->dsPointerLeadTime[mode] = dsPointerLeadTime;
5932 stream_.device[mode] = device;
5933 stream_.state = STREAM_STOPPED;
5934 if ( stream_.mode == OUTPUT && mode == INPUT )
5935 // We had already set up an output stream.
5936 stream_.mode = DUPLEX;
5938 stream_.mode = mode;
5939 stream_.nBuffers = nBuffers;
5940 stream_.sampleRate = sampleRate;
5942 // Setup the buffer conversion information structure.
5943 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
5945 // Setup the callback thread.
5946 if ( stream_.callbackInfo.isRunning == false ) {
5948 stream_.callbackInfo.isRunning = true;
5949 stream_.callbackInfo.object = (void *) this;
5950 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &callbackHandler,
5951 &stream_.callbackInfo, 0, &threadId );
5952 if ( stream_.callbackInfo.thread == 0 ) {
5953 errorText_ = "RtApiDs::probeDeviceOpen: error creating callback thread!";
5957 // Boost DS thread priority
5958 SetThreadPriority( (HANDLE) stream_.callbackInfo.thread, THREAD_PRIORITY_HIGHEST );
5964 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
5965 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
5966 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
5967 if ( buffer ) buffer->Release();
5970 if ( handle->buffer[1] ) {
5971 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
5972 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
5973 if ( buffer ) buffer->Release();
5976 CloseHandle( handle->condition );
5978 stream_.apiHandle = 0;
5981 for ( int i=0; i<2; i++ ) {
5982 if ( stream_.userBuffer[i] ) {
5983 free( stream_.userBuffer[i] );
5984 stream_.userBuffer[i] = 0;
5988 if ( stream_.deviceBuffer ) {
5989 free( stream_.deviceBuffer );
5990 stream_.deviceBuffer = 0;
5993 stream_.state = STREAM_CLOSED;
5997 void RtApiDs :: closeStream()
5999 if ( stream_.state == STREAM_CLOSED ) {
6000 errorText_ = "RtApiDs::closeStream(): no open stream to close!";
6001 error( RtAudioError::WARNING );
6005 // Stop the callback thread.
6006 stream_.callbackInfo.isRunning = false;
6007 WaitForSingleObject( (HANDLE) stream_.callbackInfo.thread, INFINITE );
6008 CloseHandle( (HANDLE) stream_.callbackInfo.thread );
6010 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6012 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
6013 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
6014 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6021 if ( handle->buffer[1] ) {
6022 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
6023 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6030 CloseHandle( handle->condition );
6032 stream_.apiHandle = 0;
6035 for ( int i=0; i<2; i++ ) {
6036 if ( stream_.userBuffer[i] ) {
6037 free( stream_.userBuffer[i] );
6038 stream_.userBuffer[i] = 0;
6042 if ( stream_.deviceBuffer ) {
6043 free( stream_.deviceBuffer );
6044 stream_.deviceBuffer = 0;
6047 stream_.mode = UNINITIALIZED;
6048 stream_.state = STREAM_CLOSED;
6051 void RtApiDs :: startStream()
6054 if ( stream_.state == STREAM_RUNNING ) {
6055 errorText_ = "RtApiDs::startStream(): the stream is already running!";
6056 error( RtAudioError::WARNING );
6060 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6062 // Increase scheduler frequency on lesser windows (a side-effect of
6063 // increasing timer accuracy). On greater windows (Win2K or later),
6064 // this is already in effect.
6065 timeBeginPeriod( 1 );
6067 buffersRolling = false;
6068 duplexPrerollBytes = 0;
6070 if ( stream_.mode == DUPLEX ) {
6071 // 0.5 seconds of silence in DUPLEX mode while the devices spin up and synchronize.
6072 duplexPrerollBytes = (int) ( 0.5 * stream_.sampleRate * formatBytes( stream_.deviceFormat[1] ) * stream_.nDeviceChannels[1] );
6076 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6078 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6079 result = buffer->Play( 0, 0, DSBPLAY_LOOPING );
6080 if ( FAILED( result ) ) {
6081 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting output buffer!";
6082 errorText_ = errorStream_.str();
6087 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6089 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6090 result = buffer->Start( DSCBSTART_LOOPING );
6091 if ( FAILED( result ) ) {
6092 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting input buffer!";
6093 errorText_ = errorStream_.str();
6098 handle->drainCounter = 0;
6099 handle->internalDrain = false;
6100 ResetEvent( handle->condition );
6101 stream_.state = STREAM_RUNNING;
6104 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
6107 void RtApiDs :: stopStream()
6110 if ( stream_.state == STREAM_STOPPED ) {
6111 errorText_ = "RtApiDs::stopStream(): the stream is already stopped!";
6112 error( RtAudioError::WARNING );
6119 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6120 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6121 if ( handle->drainCounter == 0 ) {
6122 handle->drainCounter = 2;
6123 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
6126 stream_.state = STREAM_STOPPED;
6128 MUTEX_LOCK( &stream_.mutex );
6130 // Stop the buffer and clear memory
6131 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6132 result = buffer->Stop();
6133 if ( FAILED( result ) ) {
6134 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping output buffer!";
6135 errorText_ = errorStream_.str();
6139 // Lock the buffer and clear it so that if we start to play again,
6140 // we won't have old data playing.
6141 result = buffer->Lock( 0, handle->dsBufferSize[0], &audioPtr, &dataLen, NULL, NULL, 0 );
6142 if ( FAILED( result ) ) {
6143 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking output buffer!";
6144 errorText_ = errorStream_.str();
6148 // Zero the DS buffer
6149 ZeroMemory( audioPtr, dataLen );
6151 // Unlock the DS buffer
6152 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6153 if ( FAILED( result ) ) {
6154 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking output buffer!";
6155 errorText_ = errorStream_.str();
6159 // If we start playing again, we must begin at beginning of buffer.
6160 handle->bufferPointer[0] = 0;
6163 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6164 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6168 stream_.state = STREAM_STOPPED;
6170 if ( stream_.mode != DUPLEX )
6171 MUTEX_LOCK( &stream_.mutex );
6173 result = buffer->Stop();
6174 if ( FAILED( result ) ) {
6175 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping input buffer!";
6176 errorText_ = errorStream_.str();
6180 // Lock the buffer and clear it so that if we start to play again,
6181 // we won't have old data playing.
6182 result = buffer->Lock( 0, handle->dsBufferSize[1], &audioPtr, &dataLen, NULL, NULL, 0 );
6183 if ( FAILED( result ) ) {
6184 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking input buffer!";
6185 errorText_ = errorStream_.str();
6189 // Zero the DS buffer
6190 ZeroMemory( audioPtr, dataLen );
6192 // Unlock the DS buffer
6193 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6194 if ( FAILED( result ) ) {
6195 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking input buffer!";
6196 errorText_ = errorStream_.str();
6200 // If we start recording again, we must begin at beginning of buffer.
6201 handle->bufferPointer[1] = 0;
6205 timeEndPeriod( 1 ); // revert to normal scheduler frequency on lesser windows.
6206 MUTEX_UNLOCK( &stream_.mutex );
6208 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
6211 void RtApiDs :: abortStream()
6214 if ( stream_.state == STREAM_STOPPED ) {
6215 errorText_ = "RtApiDs::abortStream(): the stream is already stopped!";
6216 error( RtAudioError::WARNING );
6220 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6221 handle->drainCounter = 2;
6226 void RtApiDs :: callbackEvent()
6228 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) {
6229 Sleep( 50 ); // sleep 50 milliseconds
6233 if ( stream_.state == STREAM_CLOSED ) {
6234 errorText_ = "RtApiDs::callbackEvent(): the stream is closed ... this shouldn't happen!";
6235 error( RtAudioError::WARNING );
6239 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
6240 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6242 // Check if we were draining the stream and signal is finished.
6243 if ( handle->drainCounter > stream_.nBuffers + 2 ) {
6245 stream_.state = STREAM_STOPPING;
6246 if ( handle->internalDrain == false )
6247 SetEvent( handle->condition );
6253 // Invoke user callback to get fresh output data UNLESS we are
6255 if ( handle->drainCounter == 0 ) {
6256 RtAudioCallback callback = (RtAudioCallback) info->callback;
6257 double streamTime = getStreamTime();
6258 RtAudioStreamStatus status = 0;
6259 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
6260 status |= RTAUDIO_OUTPUT_UNDERFLOW;
6261 handle->xrun[0] = false;
6263 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
6264 status |= RTAUDIO_INPUT_OVERFLOW;
6265 handle->xrun[1] = false;
6267 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
6268 stream_.bufferSize, streamTime, status, info->userData );
6269 if ( cbReturnValue == 2 ) {
6270 stream_.state = STREAM_STOPPING;
6271 handle->drainCounter = 2;
6275 else if ( cbReturnValue == 1 ) {
6276 handle->drainCounter = 1;
6277 handle->internalDrain = true;
6282 DWORD currentWritePointer, safeWritePointer;
6283 DWORD currentReadPointer, safeReadPointer;
6284 UINT nextWritePointer;
6286 LPVOID buffer1 = NULL;
6287 LPVOID buffer2 = NULL;
6288 DWORD bufferSize1 = 0;
6289 DWORD bufferSize2 = 0;
6294 MUTEX_LOCK( &stream_.mutex );
6295 if ( stream_.state == STREAM_STOPPED ) {
6296 MUTEX_UNLOCK( &stream_.mutex );
6300 if ( buffersRolling == false ) {
6301 if ( stream_.mode == DUPLEX ) {
6302 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
6304 // It takes a while for the devices to get rolling. As a result,
6305 // there's no guarantee that the capture and write device pointers
6306 // will move in lockstep. Wait here for both devices to start
6307 // rolling, and then set our buffer pointers accordingly.
6308 // e.g. Crystal Drivers: the capture buffer starts up 5700 to 9600
6309 // bytes later than the write buffer.
6311 // Stub: a serious risk of having a pre-emptive scheduling round
6312 // take place between the two GetCurrentPosition calls... but I'm
6313 // really not sure how to solve the problem. Temporarily boost to
6314 // Realtime priority, maybe; but I'm not sure what priority the
6315 // DirectSound service threads run at. We *should* be roughly
6316 // within a ms or so of correct.
6318 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6319 LPDIRECTSOUNDCAPTUREBUFFER dsCaptureBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6321 DWORD startSafeWritePointer, startSafeReadPointer;
6323 result = dsWriteBuffer->GetCurrentPosition( NULL, &startSafeWritePointer );
6324 if ( FAILED( result ) ) {
6325 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6326 errorText_ = errorStream_.str();
6327 MUTEX_UNLOCK( &stream_.mutex );
6328 error( RtAudioError::SYSTEM_ERROR );
6331 result = dsCaptureBuffer->GetCurrentPosition( NULL, &startSafeReadPointer );
6332 if ( FAILED( result ) ) {
6333 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6334 errorText_ = errorStream_.str();
6335 MUTEX_UNLOCK( &stream_.mutex );
6336 error( RtAudioError::SYSTEM_ERROR );
6340 result = dsWriteBuffer->GetCurrentPosition( NULL, &safeWritePointer );
6341 if ( FAILED( result ) ) {
6342 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6343 errorText_ = errorStream_.str();
6344 MUTEX_UNLOCK( &stream_.mutex );
6345 error( RtAudioError::SYSTEM_ERROR );
6348 result = dsCaptureBuffer->GetCurrentPosition( NULL, &safeReadPointer );
6349 if ( FAILED( result ) ) {
6350 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6351 errorText_ = errorStream_.str();
6352 MUTEX_UNLOCK( &stream_.mutex );
6353 error( RtAudioError::SYSTEM_ERROR );
6356 if ( safeWritePointer != startSafeWritePointer && safeReadPointer != startSafeReadPointer ) break;
6360 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
6362 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
6363 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
6364 handle->bufferPointer[1] = safeReadPointer;
6366 else if ( stream_.mode == OUTPUT ) {
6368 // Set the proper nextWritePosition after initial startup.
6369 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6370 result = dsWriteBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
6371 if ( FAILED( result ) ) {
6372 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6373 errorText_ = errorStream_.str();
6374 MUTEX_UNLOCK( &stream_.mutex );
6375 error( RtAudioError::SYSTEM_ERROR );
6378 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
6379 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
6382 buffersRolling = true;
6385 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6387 LPDIRECTSOUNDBUFFER dsBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6389 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
6390 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
6391 bufferBytes *= formatBytes( stream_.userFormat );
6392 memset( stream_.userBuffer[0], 0, bufferBytes );
6395 // Setup parameters and do buffer conversion if necessary.
6396 if ( stream_.doConvertBuffer[0] ) {
6397 buffer = stream_.deviceBuffer;
6398 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
6399 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[0];
6400 bufferBytes *= formatBytes( stream_.deviceFormat[0] );
6403 buffer = stream_.userBuffer[0];
6404 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
6405 bufferBytes *= formatBytes( stream_.userFormat );
6408 // No byte swapping necessary in DirectSound implementation.
6410 // Ahhh ... windoze. 16-bit data is signed but 8-bit data is
6411 // unsigned. So, we need to convert our signed 8-bit data here to
6413 if ( stream_.deviceFormat[0] == RTAUDIO_SINT8 )
6414 for ( int i=0; i<bufferBytes; i++ ) buffer[i] = (unsigned char) ( buffer[i] + 128 );
6416 DWORD dsBufferSize = handle->dsBufferSize[0];
6417 nextWritePointer = handle->bufferPointer[0];
6419 DWORD endWrite, leadPointer;
6421 // Find out where the read and "safe write" pointers are.
6422 result = dsBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
6423 if ( FAILED( result ) ) {
6424 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6425 errorText_ = errorStream_.str();
6426 MUTEX_UNLOCK( &stream_.mutex );
6427 error( RtAudioError::SYSTEM_ERROR );
6431 // We will copy our output buffer into the region between
6432 // safeWritePointer and leadPointer. If leadPointer is not
6433 // beyond the next endWrite position, wait until it is.
6434 leadPointer = safeWritePointer + handle->dsPointerLeadTime[0];
6435 //std::cout << "safeWritePointer = " << safeWritePointer << ", leadPointer = " << leadPointer << ", nextWritePointer = " << nextWritePointer << std::endl;
6436 if ( leadPointer > dsBufferSize ) leadPointer -= dsBufferSize;
6437 if ( leadPointer < nextWritePointer ) leadPointer += dsBufferSize; // unwrap offset
6438 endWrite = nextWritePointer + bufferBytes;
6440 // Check whether the entire write region is behind the play pointer.
6441 if ( leadPointer >= endWrite ) break;
6443 // If we are here, then we must wait until the leadPointer advances
6444 // beyond the end of our next write region. We use the
6445 // Sleep() function to suspend operation until that happens.
6446 double millis = ( endWrite - leadPointer ) * 1000.0;
6447 millis /= ( formatBytes( stream_.deviceFormat[0]) * stream_.nDeviceChannels[0] * stream_.sampleRate);
6448 if ( millis < 1.0 ) millis = 1.0;
6449 Sleep( (DWORD) millis );
6452 if ( dsPointerBetween( nextWritePointer, safeWritePointer, currentWritePointer, dsBufferSize )
6453 || dsPointerBetween( endWrite, safeWritePointer, currentWritePointer, dsBufferSize ) ) {
6454 // We've strayed into the forbidden zone ... resync the read pointer.
6455 handle->xrun[0] = true;
6456 nextWritePointer = safeWritePointer + handle->dsPointerLeadTime[0] - bufferBytes;
6457 if ( nextWritePointer >= dsBufferSize ) nextWritePointer -= dsBufferSize;
6458 handle->bufferPointer[0] = nextWritePointer;
6459 endWrite = nextWritePointer + bufferBytes;
6462 // Lock free space in the buffer
6463 result = dsBuffer->Lock( nextWritePointer, bufferBytes, &buffer1,
6464 &bufferSize1, &buffer2, &bufferSize2, 0 );
6465 if ( FAILED( result ) ) {
6466 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking buffer during playback!";
6467 errorText_ = errorStream_.str();
6468 MUTEX_UNLOCK( &stream_.mutex );
6469 error( RtAudioError::SYSTEM_ERROR );
6473 // Copy our buffer into the DS buffer
6474 CopyMemory( buffer1, buffer, bufferSize1 );
6475 if ( buffer2 != NULL ) CopyMemory( buffer2, buffer+bufferSize1, bufferSize2 );
6477 // Update our buffer offset and unlock sound buffer
6478 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
6479 if ( FAILED( result ) ) {
6480 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking buffer during playback!";
6481 errorText_ = errorStream_.str();
6482 MUTEX_UNLOCK( &stream_.mutex );
6483 error( RtAudioError::SYSTEM_ERROR );
6486 nextWritePointer = ( nextWritePointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
6487 handle->bufferPointer[0] = nextWritePointer;
6490 // Don't bother draining input
6491 if ( handle->drainCounter ) {
6492 handle->drainCounter++;
6496 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6498 // Setup parameters.
6499 if ( stream_.doConvertBuffer[1] ) {
6500 buffer = stream_.deviceBuffer;
6501 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[1];
6502 bufferBytes *= formatBytes( stream_.deviceFormat[1] );
6505 buffer = stream_.userBuffer[1];
6506 bufferBytes = stream_.bufferSize * stream_.nUserChannels[1];
6507 bufferBytes *= formatBytes( stream_.userFormat );
6510 LPDIRECTSOUNDCAPTUREBUFFER dsBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6511 long nextReadPointer = handle->bufferPointer[1];
6512 DWORD dsBufferSize = handle->dsBufferSize[1];
6514 // Find out where the write and "safe read" pointers are.
6515 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
6516 if ( FAILED( result ) ) {
6517 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6518 errorText_ = errorStream_.str();
6519 MUTEX_UNLOCK( &stream_.mutex );
6520 error( RtAudioError::SYSTEM_ERROR );
6524 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
6525 DWORD endRead = nextReadPointer + bufferBytes;
6527 // Handling depends on whether we are INPUT or DUPLEX.
6528 // If we're in INPUT mode then waiting is a good thing. If we're in DUPLEX mode,
6529 // then a wait here will drag the write pointers into the forbidden zone.
6531 // In DUPLEX mode, rather than wait, we will back off the read pointer until
6532 // it's in a safe position. This causes dropouts, but it seems to be the only
6533 // practical way to sync up the read and write pointers reliably, given the
6534 // the very complex relationship between phase and increment of the read and write
6537 // In order to minimize audible dropouts in DUPLEX mode, we will
6538 // provide a pre-roll period of 0.5 seconds in which we return
6539 // zeros from the read buffer while the pointers sync up.
6541 if ( stream_.mode == DUPLEX ) {
6542 if ( safeReadPointer < endRead ) {
6543 if ( duplexPrerollBytes <= 0 ) {
6544 // Pre-roll time over. Be more agressive.
6545 int adjustment = endRead-safeReadPointer;
6547 handle->xrun[1] = true;
6549 // - large adjustments: we've probably run out of CPU cycles, so just resync exactly,
6550 // and perform fine adjustments later.
6551 // - small adjustments: back off by twice as much.
6552 if ( adjustment >= 2*bufferBytes )
6553 nextReadPointer = safeReadPointer-2*bufferBytes;
6555 nextReadPointer = safeReadPointer-bufferBytes-adjustment;
6557 if ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
6561 // In pre=roll time. Just do it.
6562 nextReadPointer = safeReadPointer - bufferBytes;
6563 while ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
6565 endRead = nextReadPointer + bufferBytes;
6568 else { // mode == INPUT
6569 while ( safeReadPointer < endRead && stream_.callbackInfo.isRunning ) {
6570 // See comments for playback.
6571 double millis = (endRead - safeReadPointer) * 1000.0;
6572 millis /= ( formatBytes(stream_.deviceFormat[1]) * stream_.nDeviceChannels[1] * stream_.sampleRate);
6573 if ( millis < 1.0 ) millis = 1.0;
6574 Sleep( (DWORD) millis );
6576 // Wake up and find out where we are now.
6577 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
6578 if ( FAILED( result ) ) {
6579 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6580 errorText_ = errorStream_.str();
6581 MUTEX_UNLOCK( &stream_.mutex );
6582 error( RtAudioError::SYSTEM_ERROR );
6586 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
6590 // Lock free space in the buffer
6591 result = dsBuffer->Lock( nextReadPointer, bufferBytes, &buffer1,
6592 &bufferSize1, &buffer2, &bufferSize2, 0 );
6593 if ( FAILED( result ) ) {
6594 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking capture buffer!";
6595 errorText_ = errorStream_.str();
6596 MUTEX_UNLOCK( &stream_.mutex );
6597 error( RtAudioError::SYSTEM_ERROR );
6601 if ( duplexPrerollBytes <= 0 ) {
6602 // Copy our buffer into the DS buffer
6603 CopyMemory( buffer, buffer1, bufferSize1 );
6604 if ( buffer2 != NULL ) CopyMemory( buffer+bufferSize1, buffer2, bufferSize2 );
6607 memset( buffer, 0, bufferSize1 );
6608 if ( buffer2 != NULL ) memset( buffer + bufferSize1, 0, bufferSize2 );
6609 duplexPrerollBytes -= bufferSize1 + bufferSize2;
6612 // Update our buffer offset and unlock sound buffer
6613 nextReadPointer = ( nextReadPointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
6614 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
6615 if ( FAILED( result ) ) {
6616 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking capture buffer!";
6617 errorText_ = errorStream_.str();
6618 MUTEX_UNLOCK( &stream_.mutex );
6619 error( RtAudioError::SYSTEM_ERROR );
6622 handle->bufferPointer[1] = nextReadPointer;
6624 // No byte swapping necessary in DirectSound implementation.
6626 // If necessary, convert 8-bit data from unsigned to signed.
6627 if ( stream_.deviceFormat[1] == RTAUDIO_SINT8 )
6628 for ( int j=0; j<bufferBytes; j++ ) buffer[j] = (signed char) ( buffer[j] - 128 );
6630 // Do buffer conversion if necessary.
6631 if ( stream_.doConvertBuffer[1] )
6632 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
6636 MUTEX_UNLOCK( &stream_.mutex );
6637 RtApi::tickStreamTime();
6640 // Definitions for utility functions and callbacks
6641 // specific to the DirectSound implementation.
6643 static unsigned __stdcall callbackHandler( void *ptr )
6645 CallbackInfo *info = (CallbackInfo *) ptr;
6646 RtApiDs *object = (RtApiDs *) info->object;
6647 bool* isRunning = &info->isRunning;
6649 while ( *isRunning == true ) {
6650 object->callbackEvent();
6657 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
6658 LPCTSTR description,
6662 struct DsProbeData& probeInfo = *(struct DsProbeData*) lpContext;
6663 std::vector<struct DsDevice>& dsDevices = *probeInfo.dsDevices;
6666 bool validDevice = false;
6667 if ( probeInfo.isInput == true ) {
6669 LPDIRECTSOUNDCAPTURE object;
6671 hr = DirectSoundCaptureCreate( lpguid, &object, NULL );
6672 if ( hr != DS_OK ) return TRUE;
6674 caps.dwSize = sizeof(caps);
6675 hr = object->GetCaps( &caps );
6676 if ( hr == DS_OK ) {
6677 if ( caps.dwChannels > 0 && caps.dwFormats > 0 )
6684 LPDIRECTSOUND object;
6685 hr = DirectSoundCreate( lpguid, &object, NULL );
6686 if ( hr != DS_OK ) return TRUE;
6688 caps.dwSize = sizeof(caps);
6689 hr = object->GetCaps( &caps );
6690 if ( hr == DS_OK ) {
6691 if ( caps.dwFlags & DSCAPS_PRIMARYMONO || caps.dwFlags & DSCAPS_PRIMARYSTEREO )
6697 // If good device, then save its name and guid.
6698 std::string name = convertCharPointerToStdString( description );
6699 //if ( name == "Primary Sound Driver" || name == "Primary Sound Capture Driver" )
6700 if ( lpguid == NULL )
6701 name = "Default Device";
6702 if ( validDevice ) {
6703 for ( unsigned int i=0; i<dsDevices.size(); i++ ) {
6704 if ( dsDevices[i].name == name ) {
6705 dsDevices[i].found = true;
6706 if ( probeInfo.isInput ) {
6707 dsDevices[i].id[1] = lpguid;
6708 dsDevices[i].validId[1] = true;
6711 dsDevices[i].id[0] = lpguid;
6712 dsDevices[i].validId[0] = true;
6720 device.found = true;
6721 if ( probeInfo.isInput ) {
6722 device.id[1] = lpguid;
6723 device.validId[1] = true;
6726 device.id[0] = lpguid;
6727 device.validId[0] = true;
6729 dsDevices.push_back( device );
6735 static const char* getErrorString( int code )
6739 case DSERR_ALLOCATED:
6740 return "Already allocated";
6742 case DSERR_CONTROLUNAVAIL:
6743 return "Control unavailable";
6745 case DSERR_INVALIDPARAM:
6746 return "Invalid parameter";
6748 case DSERR_INVALIDCALL:
6749 return "Invalid call";
6752 return "Generic error";
6754 case DSERR_PRIOLEVELNEEDED:
6755 return "Priority level needed";
6757 case DSERR_OUTOFMEMORY:
6758 return "Out of memory";
6760 case DSERR_BADFORMAT:
6761 return "The sample rate or the channel format is not supported";
6763 case DSERR_UNSUPPORTED:
6764 return "Not supported";
6766 case DSERR_NODRIVER:
6769 case DSERR_ALREADYINITIALIZED:
6770 return "Already initialized";
6772 case DSERR_NOAGGREGATION:
6773 return "No aggregation";
6775 case DSERR_BUFFERLOST:
6776 return "Buffer lost";
6778 case DSERR_OTHERAPPHASPRIO:
6779 return "Another application already has priority";
6781 case DSERR_UNINITIALIZED:
6782 return "Uninitialized";
6785 return "DirectSound unknown error";
6788 //******************** End of __WINDOWS_DS__ *********************//
6792 #if defined(__LINUX_ALSA__)
6794 #include <alsa/asoundlib.h>
6797 // A structure to hold various information related to the ALSA API
6800 snd_pcm_t *handles[2];
6803 pthread_cond_t runnable_cv;
6807 :synchronized(false), runnable(false) { xrun[0] = false; xrun[1] = false; }
6810 static void *alsaCallbackHandler( void * ptr );
6812 RtApiAlsa :: RtApiAlsa()
6814 // Nothing to do here.
6817 RtApiAlsa :: ~RtApiAlsa()
6819 if ( stream_.state != STREAM_CLOSED ) closeStream();
6822 unsigned int RtApiAlsa :: getDeviceCount( void )
6824 unsigned nDevices = 0;
6825 int result, subdevice, card;
6829 // Count cards and devices
6831 snd_card_next( &card );
6832 while ( card >= 0 ) {
6833 sprintf( name, "hw:%d", card );
6834 result = snd_ctl_open( &handle, name, 0 );
6836 errorStream_ << "RtApiAlsa::getDeviceCount: control open, card = " << card << ", " << snd_strerror( result ) << ".";
6837 errorText_ = errorStream_.str();
6838 error( RtAudioError::WARNING );
6843 result = snd_ctl_pcm_next_device( handle, &subdevice );
6845 errorStream_ << "RtApiAlsa::getDeviceCount: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
6846 errorText_ = errorStream_.str();
6847 error( RtAudioError::WARNING );
6850 if ( subdevice < 0 )
6855 snd_ctl_close( handle );
6856 snd_card_next( &card );
6859 result = snd_ctl_open( &handle, "default", 0 );
6862 snd_ctl_close( handle );
6868 RtAudio::DeviceInfo RtApiAlsa :: getDeviceInfo( unsigned int device )
6870 RtAudio::DeviceInfo info;
6871 info.probed = false;
6873 unsigned nDevices = 0;
6874 int result, subdevice, card;
6878 // Count cards and devices
6881 snd_card_next( &card );
6882 while ( card >= 0 ) {
6883 sprintf( name, "hw:%d", card );
6884 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
6886 errorStream_ << "RtApiAlsa::getDeviceInfo: control open, card = " << card << ", " << snd_strerror( result ) << ".";
6887 errorText_ = errorStream_.str();
6888 error( RtAudioError::WARNING );
6893 result = snd_ctl_pcm_next_device( chandle, &subdevice );
6895 errorStream_ << "RtApiAlsa::getDeviceInfo: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
6896 errorText_ = errorStream_.str();
6897 error( RtAudioError::WARNING );
6900 if ( subdevice < 0 ) break;
6901 if ( nDevices == device ) {
6902 sprintf( name, "hw:%d,%d", card, subdevice );
6908 snd_ctl_close( chandle );
6909 snd_card_next( &card );
6912 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
6913 if ( result == 0 ) {
6914 if ( nDevices == device ) {
6915 strcpy( name, "default" );
6921 if ( nDevices == 0 ) {
6922 errorText_ = "RtApiAlsa::getDeviceInfo: no devices found!";
6923 error( RtAudioError::INVALID_USE );
6927 if ( device >= nDevices ) {
6928 errorText_ = "RtApiAlsa::getDeviceInfo: device ID is invalid!";
6929 error( RtAudioError::INVALID_USE );
6935 // If a stream is already open, we cannot probe the stream devices.
6936 // Thus, use the saved results.
6937 if ( stream_.state != STREAM_CLOSED &&
6938 ( stream_.device[0] == device || stream_.device[1] == device ) ) {
6939 snd_ctl_close( chandle );
6940 if ( device >= devices_.size() ) {
6941 errorText_ = "RtApiAlsa::getDeviceInfo: device ID was not present before stream was opened.";
6942 error( RtAudioError::WARNING );
6945 return devices_[ device ];
6948 int openMode = SND_PCM_ASYNC;
6949 snd_pcm_stream_t stream;
6950 snd_pcm_info_t *pcminfo;
6951 snd_pcm_info_alloca( &pcminfo );
6953 snd_pcm_hw_params_t *params;
6954 snd_pcm_hw_params_alloca( ¶ms );
6956 // First try for playback unless default device (which has subdev -1)
6957 stream = SND_PCM_STREAM_PLAYBACK;
6958 snd_pcm_info_set_stream( pcminfo, stream );
6959 if ( subdevice != -1 ) {
6960 snd_pcm_info_set_device( pcminfo, subdevice );
6961 snd_pcm_info_set_subdevice( pcminfo, 0 );
6963 result = snd_ctl_pcm_info( chandle, pcminfo );
6965 // Device probably doesn't support playback.
6970 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK );
6972 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
6973 errorText_ = errorStream_.str();
6974 error( RtAudioError::WARNING );
6978 // The device is open ... fill the parameter structure.
6979 result = snd_pcm_hw_params_any( phandle, params );
6981 snd_pcm_close( phandle );
6982 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
6983 errorText_ = errorStream_.str();
6984 error( RtAudioError::WARNING );
6988 // Get output channel information.
6990 result = snd_pcm_hw_params_get_channels_max( params, &value );
6992 snd_pcm_close( phandle );
6993 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") output channels, " << snd_strerror( result ) << ".";
6994 errorText_ = errorStream_.str();
6995 error( RtAudioError::WARNING );
6998 info.outputChannels = value;
6999 snd_pcm_close( phandle );
7002 stream = SND_PCM_STREAM_CAPTURE;
7003 snd_pcm_info_set_stream( pcminfo, stream );
7005 // Now try for capture unless default device (with subdev = -1)
7006 if ( subdevice != -1 ) {
7007 result = snd_ctl_pcm_info( chandle, pcminfo );
7008 snd_ctl_close( chandle );
7010 // Device probably doesn't support capture.
7011 if ( info.outputChannels == 0 ) return info;
7012 goto probeParameters;
7016 snd_ctl_close( chandle );
7018 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
7020 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7021 errorText_ = errorStream_.str();
7022 error( RtAudioError::WARNING );
7023 if ( info.outputChannels == 0 ) return info;
7024 goto probeParameters;
7027 // The device is open ... fill the parameter structure.
7028 result = snd_pcm_hw_params_any( phandle, params );
7030 snd_pcm_close( phandle );
7031 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7032 errorText_ = errorStream_.str();
7033 error( RtAudioError::WARNING );
7034 if ( info.outputChannels == 0 ) return info;
7035 goto probeParameters;
7038 result = snd_pcm_hw_params_get_channels_max( params, &value );
7040 snd_pcm_close( phandle );
7041 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") input channels, " << snd_strerror( result ) << ".";
7042 errorText_ = errorStream_.str();
7043 error( RtAudioError::WARNING );
7044 if ( info.outputChannels == 0 ) return info;
7045 goto probeParameters;
7047 info.inputChannels = value;
7048 snd_pcm_close( phandle );
7050 // If device opens for both playback and capture, we determine the channels.
7051 if ( info.outputChannels > 0 && info.inputChannels > 0 )
7052 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
7054 // ALSA doesn't provide default devices so we'll use the first available one.
7055 if ( device == 0 && info.outputChannels > 0 )
7056 info.isDefaultOutput = true;
7057 if ( device == 0 && info.inputChannels > 0 )
7058 info.isDefaultInput = true;
7061 // At this point, we just need to figure out the supported data
7062 // formats and sample rates. We'll proceed by opening the device in
7063 // the direction with the maximum number of channels, or playback if
7064 // they are equal. This might limit our sample rate options, but so
7067 if ( info.outputChannels >= info.inputChannels )
7068 stream = SND_PCM_STREAM_PLAYBACK;
7070 stream = SND_PCM_STREAM_CAPTURE;
7071 snd_pcm_info_set_stream( pcminfo, stream );
7073 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
7075 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7076 errorText_ = errorStream_.str();
7077 error( RtAudioError::WARNING );
7081 // The device is open ... fill the parameter structure.
7082 result = snd_pcm_hw_params_any( phandle, params );
7084 snd_pcm_close( phandle );
7085 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7086 errorText_ = errorStream_.str();
7087 error( RtAudioError::WARNING );
7091 // Test our discrete set of sample rate values.
7092 info.sampleRates.clear();
7093 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
7094 if ( snd_pcm_hw_params_test_rate( phandle, params, SAMPLE_RATES[i], 0 ) == 0 ) {
7095 info.sampleRates.push_back( SAMPLE_RATES[i] );
7097 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
7098 info.preferredSampleRate = SAMPLE_RATES[i];
7101 if ( info.sampleRates.size() == 0 ) {
7102 snd_pcm_close( phandle );
7103 errorStream_ << "RtApiAlsa::getDeviceInfo: no supported sample rates found for device (" << name << ").";
7104 errorText_ = errorStream_.str();
7105 error( RtAudioError::WARNING );
7109 // Probe the supported data formats ... we don't care about endian-ness just yet
7110 snd_pcm_format_t format;
7111 info.nativeFormats = 0;
7112 format = SND_PCM_FORMAT_S8;
7113 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7114 info.nativeFormats |= RTAUDIO_SINT8;
7115 format = SND_PCM_FORMAT_S16;
7116 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7117 info.nativeFormats |= RTAUDIO_SINT16;
7118 format = SND_PCM_FORMAT_S24;
7119 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7120 info.nativeFormats |= RTAUDIO_SINT24;
7121 format = SND_PCM_FORMAT_S32;
7122 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7123 info.nativeFormats |= RTAUDIO_SINT32;
7124 format = SND_PCM_FORMAT_FLOAT;
7125 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7126 info.nativeFormats |= RTAUDIO_FLOAT32;
7127 format = SND_PCM_FORMAT_FLOAT64;
7128 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7129 info.nativeFormats |= RTAUDIO_FLOAT64;
7131 // Check that we have at least one supported format
7132 if ( info.nativeFormats == 0 ) {
7133 snd_pcm_close( phandle );
7134 errorStream_ << "RtApiAlsa::getDeviceInfo: pcm device (" << name << ") data format not supported by RtAudio.";
7135 errorText_ = errorStream_.str();
7136 error( RtAudioError::WARNING );
7140 // Get the device name
7142 result = snd_card_get_name( card, &cardname );
7143 if ( result >= 0 ) {
7144 sprintf( name, "hw:%s,%d", cardname, subdevice );
7149 // That's all ... close the device and return
7150 snd_pcm_close( phandle );
7155 void RtApiAlsa :: saveDeviceInfo( void )
7159 unsigned int nDevices = getDeviceCount();
7160 devices_.resize( nDevices );
7161 for ( unsigned int i=0; i<nDevices; i++ )
7162 devices_[i] = getDeviceInfo( i );
7165 bool RtApiAlsa :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
7166 unsigned int firstChannel, unsigned int sampleRate,
7167 RtAudioFormat format, unsigned int *bufferSize,
7168 RtAudio::StreamOptions *options )
7171 #if defined(__RTAUDIO_DEBUG__)
7173 snd_output_stdio_attach(&out, stderr, 0);
7176 // I'm not using the "plug" interface ... too much inconsistent behavior.
7178 unsigned nDevices = 0;
7179 int result, subdevice, card;
7183 if ( options && options->flags & RTAUDIO_ALSA_USE_DEFAULT )
7184 snprintf(name, sizeof(name), "%s", "default");
7186 // Count cards and devices
7188 snd_card_next( &card );
7189 while ( card >= 0 ) {
7190 sprintf( name, "hw:%d", card );
7191 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
7193 errorStream_ << "RtApiAlsa::probeDeviceOpen: control open, card = " << card << ", " << snd_strerror( result ) << ".";
7194 errorText_ = errorStream_.str();
7199 result = snd_ctl_pcm_next_device( chandle, &subdevice );
7200 if ( result < 0 ) break;
7201 if ( subdevice < 0 ) break;
7202 if ( nDevices == device ) {
7203 sprintf( name, "hw:%d,%d", card, subdevice );
7204 snd_ctl_close( chandle );
7209 snd_ctl_close( chandle );
7210 snd_card_next( &card );
7213 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
7214 if ( result == 0 ) {
7215 if ( nDevices == device ) {
7216 strcpy( name, "default" );
7222 if ( nDevices == 0 ) {
7223 // This should not happen because a check is made before this function is called.
7224 errorText_ = "RtApiAlsa::probeDeviceOpen: no devices found!";
7228 if ( device >= nDevices ) {
7229 // This should not happen because a check is made before this function is called.
7230 errorText_ = "RtApiAlsa::probeDeviceOpen: device ID is invalid!";
7237 // The getDeviceInfo() function will not work for a device that is
7238 // already open. Thus, we'll probe the system before opening a
7239 // stream and save the results for use by getDeviceInfo().
7240 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) // only do once
7241 this->saveDeviceInfo();
7243 snd_pcm_stream_t stream;
7244 if ( mode == OUTPUT )
7245 stream = SND_PCM_STREAM_PLAYBACK;
7247 stream = SND_PCM_STREAM_CAPTURE;
7250 int openMode = SND_PCM_ASYNC;
7251 result = snd_pcm_open( &phandle, name, stream, openMode );
7253 if ( mode == OUTPUT )
7254 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for output.";
7256 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for input.";
7257 errorText_ = errorStream_.str();
7261 // Fill the parameter structure.
7262 snd_pcm_hw_params_t *hw_params;
7263 snd_pcm_hw_params_alloca( &hw_params );
7264 result = snd_pcm_hw_params_any( phandle, hw_params );
7266 snd_pcm_close( phandle );
7267 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") parameters, " << snd_strerror( result ) << ".";
7268 errorText_ = errorStream_.str();
7272 #if defined(__RTAUDIO_DEBUG__)
7273 fprintf( stderr, "\nRtApiAlsa: dump hardware params just after device open:\n\n" );
7274 snd_pcm_hw_params_dump( hw_params, out );
7277 // Set access ... check user preference.
7278 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) {
7279 stream_.userInterleaved = false;
7280 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
7282 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
7283 stream_.deviceInterleaved[mode] = true;
7286 stream_.deviceInterleaved[mode] = false;
7289 stream_.userInterleaved = true;
7290 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
7292 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
7293 stream_.deviceInterleaved[mode] = false;
7296 stream_.deviceInterleaved[mode] = true;
7300 snd_pcm_close( phandle );
7301 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") access, " << snd_strerror( result ) << ".";
7302 errorText_ = errorStream_.str();
7306 // Determine how to set the device format.
7307 stream_.userFormat = format;
7308 snd_pcm_format_t deviceFormat = SND_PCM_FORMAT_UNKNOWN;
7310 if ( format == RTAUDIO_SINT8 )
7311 deviceFormat = SND_PCM_FORMAT_S8;
7312 else if ( format == RTAUDIO_SINT16 )
7313 deviceFormat = SND_PCM_FORMAT_S16;
7314 else if ( format == RTAUDIO_SINT24 )
7315 deviceFormat = SND_PCM_FORMAT_S24;
7316 else if ( format == RTAUDIO_SINT32 )
7317 deviceFormat = SND_PCM_FORMAT_S32;
7318 else if ( format == RTAUDIO_FLOAT32 )
7319 deviceFormat = SND_PCM_FORMAT_FLOAT;
7320 else if ( format == RTAUDIO_FLOAT64 )
7321 deviceFormat = SND_PCM_FORMAT_FLOAT64;
7323 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat) == 0) {
7324 stream_.deviceFormat[mode] = format;
7328 // The user requested format is not natively supported by the device.
7329 deviceFormat = SND_PCM_FORMAT_FLOAT64;
7330 if ( snd_pcm_hw_params_test_format( phandle, hw_params, deviceFormat ) == 0 ) {
7331 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
7335 deviceFormat = SND_PCM_FORMAT_FLOAT;
7336 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7337 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
7341 deviceFormat = SND_PCM_FORMAT_S32;
7342 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7343 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
7347 deviceFormat = SND_PCM_FORMAT_S24;
7348 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7349 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
7353 deviceFormat = SND_PCM_FORMAT_S16;
7354 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7355 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
7359 deviceFormat = SND_PCM_FORMAT_S8;
7360 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7361 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
7365 // If we get here, no supported format was found.
7366 snd_pcm_close( phandle );
7367 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device " << device << " data format not supported by RtAudio.";
7368 errorText_ = errorStream_.str();
7372 result = snd_pcm_hw_params_set_format( phandle, hw_params, deviceFormat );
7374 snd_pcm_close( phandle );
7375 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") data format, " << snd_strerror( result ) << ".";
7376 errorText_ = errorStream_.str();
7380 // Determine whether byte-swaping is necessary.
7381 stream_.doByteSwap[mode] = false;
7382 if ( deviceFormat != SND_PCM_FORMAT_S8 ) {
7383 result = snd_pcm_format_cpu_endian( deviceFormat );
7385 stream_.doByteSwap[mode] = true;
7386 else if (result < 0) {
7387 snd_pcm_close( phandle );
7388 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") endian-ness, " << snd_strerror( result ) << ".";
7389 errorText_ = errorStream_.str();
7394 // Set the sample rate.
7395 result = snd_pcm_hw_params_set_rate_near( phandle, hw_params, (unsigned int*) &sampleRate, 0 );
7397 snd_pcm_close( phandle );
7398 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting sample rate on device (" << name << "), " << snd_strerror( result ) << ".";
7399 errorText_ = errorStream_.str();
7403 // Determine the number of channels for this device. We support a possible
7404 // minimum device channel number > than the value requested by the user.
7405 stream_.nUserChannels[mode] = channels;
7407 result = snd_pcm_hw_params_get_channels_max( hw_params, &value );
7408 unsigned int deviceChannels = value;
7409 if ( result < 0 || deviceChannels < channels + firstChannel ) {
7410 snd_pcm_close( phandle );
7411 errorStream_ << "RtApiAlsa::probeDeviceOpen: requested channel parameters not supported by device (" << name << "), " << snd_strerror( result ) << ".";
7412 errorText_ = errorStream_.str();
7416 result = snd_pcm_hw_params_get_channels_min( hw_params, &value );
7418 snd_pcm_close( phandle );
7419 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting minimum channels for device (" << name << "), " << snd_strerror( result ) << ".";
7420 errorText_ = errorStream_.str();
7423 deviceChannels = value;
7424 if ( deviceChannels < channels + firstChannel ) deviceChannels = channels + firstChannel;
7425 stream_.nDeviceChannels[mode] = deviceChannels;
7427 // Set the device channels.
7428 result = snd_pcm_hw_params_set_channels( phandle, hw_params, deviceChannels );
7430 snd_pcm_close( phandle );
7431 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting channels for device (" << name << "), " << snd_strerror( result ) << ".";
7432 errorText_ = errorStream_.str();
7436 // Set the buffer (or period) size.
7438 snd_pcm_uframes_t periodSize = *bufferSize;
7439 result = snd_pcm_hw_params_set_period_size_near( phandle, hw_params, &periodSize, &dir );
7441 snd_pcm_close( phandle );
7442 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting period size for device (" << name << "), " << snd_strerror( result ) << ".";
7443 errorText_ = errorStream_.str();
7446 *bufferSize = periodSize;
7448 // Set the buffer number, which in ALSA is referred to as the "period".
7449 unsigned int periods = 0;
7450 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) periods = 2;
7451 if ( options && options->numberOfBuffers > 0 ) periods = options->numberOfBuffers;
7452 if ( periods < 2 ) periods = 4; // a fairly safe default value
7453 result = snd_pcm_hw_params_set_periods_near( phandle, hw_params, &periods, &dir );
7455 snd_pcm_close( phandle );
7456 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting periods for device (" << name << "), " << snd_strerror( result ) << ".";
7457 errorText_ = errorStream_.str();
7461 // If attempting to setup a duplex stream, the bufferSize parameter
7462 // MUST be the same in both directions!
7463 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
7464 snd_pcm_close( phandle );
7465 errorStream_ << "RtApiAlsa::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << name << ").";
7466 errorText_ = errorStream_.str();
7470 stream_.bufferSize = *bufferSize;
7472 // Install the hardware configuration
7473 result = snd_pcm_hw_params( phandle, hw_params );
7475 snd_pcm_close( phandle );
7476 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing hardware configuration on device (" << name << "), " << snd_strerror( result ) << ".";
7477 errorText_ = errorStream_.str();
7481 #if defined(__RTAUDIO_DEBUG__)
7482 fprintf(stderr, "\nRtApiAlsa: dump hardware params after installation:\n\n");
7483 snd_pcm_hw_params_dump( hw_params, out );
7486 // Set the software configuration to fill buffers with zeros and prevent device stopping on xruns.
7487 snd_pcm_sw_params_t *sw_params = NULL;
7488 snd_pcm_sw_params_alloca( &sw_params );
7489 snd_pcm_sw_params_current( phandle, sw_params );
7490 snd_pcm_sw_params_set_start_threshold( phandle, sw_params, *bufferSize );
7491 snd_pcm_sw_params_set_stop_threshold( phandle, sw_params, ULONG_MAX );
7492 snd_pcm_sw_params_set_silence_threshold( phandle, sw_params, 0 );
7494 // The following two settings were suggested by Theo Veenker
7495 //snd_pcm_sw_params_set_avail_min( phandle, sw_params, *bufferSize );
7496 //snd_pcm_sw_params_set_xfer_align( phandle, sw_params, 1 );
7498 // here are two options for a fix
7499 //snd_pcm_sw_params_set_silence_size( phandle, sw_params, ULONG_MAX );
7500 snd_pcm_uframes_t val;
7501 snd_pcm_sw_params_get_boundary( sw_params, &val );
7502 snd_pcm_sw_params_set_silence_size( phandle, sw_params, val );
7504 result = snd_pcm_sw_params( phandle, sw_params );
7506 snd_pcm_close( phandle );
7507 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing software configuration on device (" << name << "), " << snd_strerror( result ) << ".";
7508 errorText_ = errorStream_.str();
7512 #if defined(__RTAUDIO_DEBUG__)
7513 fprintf(stderr, "\nRtApiAlsa: dump software params after installation:\n\n");
7514 snd_pcm_sw_params_dump( sw_params, out );
7517 // Set flags for buffer conversion
7518 stream_.doConvertBuffer[mode] = false;
7519 if ( stream_.userFormat != stream_.deviceFormat[mode] )
7520 stream_.doConvertBuffer[mode] = true;
7521 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
7522 stream_.doConvertBuffer[mode] = true;
7523 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
7524 stream_.nUserChannels[mode] > 1 )
7525 stream_.doConvertBuffer[mode] = true;
7527 // Allocate the ApiHandle if necessary and then save.
7528 AlsaHandle *apiInfo = 0;
7529 if ( stream_.apiHandle == 0 ) {
7531 apiInfo = (AlsaHandle *) new AlsaHandle;
7533 catch ( std::bad_alloc& ) {
7534 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating AlsaHandle memory.";
7538 if ( pthread_cond_init( &apiInfo->runnable_cv, NULL ) ) {
7539 errorText_ = "RtApiAlsa::probeDeviceOpen: error initializing pthread condition variable.";
7543 stream_.apiHandle = (void *) apiInfo;
7544 apiInfo->handles[0] = 0;
7545 apiInfo->handles[1] = 0;
7548 apiInfo = (AlsaHandle *) stream_.apiHandle;
7550 apiInfo->handles[mode] = phandle;
7553 // Allocate necessary internal buffers.
7554 unsigned long bufferBytes;
7555 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
7556 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
7557 if ( stream_.userBuffer[mode] == NULL ) {
7558 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating user buffer memory.";
7562 if ( stream_.doConvertBuffer[mode] ) {
7564 bool makeBuffer = true;
7565 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
7566 if ( mode == INPUT ) {
7567 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
7568 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
7569 if ( bufferBytes <= bytesOut ) makeBuffer = false;
7574 bufferBytes *= *bufferSize;
7575 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
7576 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
7577 if ( stream_.deviceBuffer == NULL ) {
7578 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating device buffer memory.";
7584 stream_.sampleRate = sampleRate;
7585 stream_.nBuffers = periods;
7586 stream_.device[mode] = device;
7587 stream_.state = STREAM_STOPPED;
7589 // Setup the buffer conversion information structure.
7590 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
7592 // Setup thread if necessary.
7593 if ( stream_.mode == OUTPUT && mode == INPUT ) {
7594 // We had already set up an output stream.
7595 stream_.mode = DUPLEX;
7596 // Link the streams if possible.
7597 apiInfo->synchronized = false;
7598 if ( snd_pcm_link( apiInfo->handles[0], apiInfo->handles[1] ) == 0 )
7599 apiInfo->synchronized = true;
7601 errorText_ = "RtApiAlsa::probeDeviceOpen: unable to synchronize input and output devices.";
7602 error( RtAudioError::WARNING );
7606 stream_.mode = mode;
7608 // Setup callback thread.
7609 stream_.callbackInfo.object = (void *) this;
7611 // Set the thread attributes for joinable and realtime scheduling
7612 // priority (optional). The higher priority will only take affect
7613 // if the program is run as root or suid. Note, under Linux
7614 // processes with CAP_SYS_NICE privilege, a user can change
7615 // scheduling policy and priority (thus need not be root). See
7616 // POSIX "capabilities".
7617 pthread_attr_t attr;
7618 pthread_attr_init( &attr );
7619 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
7620 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
7621 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
7622 stream_.callbackInfo.doRealtime = true;
7623 struct sched_param param;
7624 int priority = options->priority;
7625 int min = sched_get_priority_min( SCHED_RR );
7626 int max = sched_get_priority_max( SCHED_RR );
7627 if ( priority < min ) priority = min;
7628 else if ( priority > max ) priority = max;
7629 param.sched_priority = priority;
7631 // Set the policy BEFORE the priority. Otherwise it fails.
7632 pthread_attr_setschedpolicy(&attr, SCHED_RR);
7633 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
7634 // This is definitely required. Otherwise it fails.
7635 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
7636 pthread_attr_setschedparam(&attr, ¶m);
7639 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
7641 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
7644 stream_.callbackInfo.isRunning = true;
7645 result = pthread_create( &stream_.callbackInfo.thread, &attr, alsaCallbackHandler, &stream_.callbackInfo );
7646 pthread_attr_destroy( &attr );
7648 // Failed. Try instead with default attributes.
7649 result = pthread_create( &stream_.callbackInfo.thread, NULL, alsaCallbackHandler, &stream_.callbackInfo );
7651 stream_.callbackInfo.isRunning = false;
7652 errorText_ = "RtApiAlsa::error creating callback thread!";
7662 pthread_cond_destroy( &apiInfo->runnable_cv );
7663 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
7664 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
7666 stream_.apiHandle = 0;
7669 if ( phandle) snd_pcm_close( phandle );
7671 for ( int i=0; i<2; i++ ) {
7672 if ( stream_.userBuffer[i] ) {
7673 free( stream_.userBuffer[i] );
7674 stream_.userBuffer[i] = 0;
7678 if ( stream_.deviceBuffer ) {
7679 free( stream_.deviceBuffer );
7680 stream_.deviceBuffer = 0;
7683 stream_.state = STREAM_CLOSED;
7687 void RtApiAlsa :: closeStream()
7689 if ( stream_.state == STREAM_CLOSED ) {
7690 errorText_ = "RtApiAlsa::closeStream(): no open stream to close!";
7691 error( RtAudioError::WARNING );
7695 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
7696 stream_.callbackInfo.isRunning = false;
7697 MUTEX_LOCK( &stream_.mutex );
7698 if ( stream_.state == STREAM_STOPPED ) {
7699 apiInfo->runnable = true;
7700 pthread_cond_signal( &apiInfo->runnable_cv );
7702 MUTEX_UNLOCK( &stream_.mutex );
7703 pthread_join( stream_.callbackInfo.thread, NULL );
7705 if ( stream_.state == STREAM_RUNNING ) {
7706 stream_.state = STREAM_STOPPED;
7707 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
7708 snd_pcm_drop( apiInfo->handles[0] );
7709 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
7710 snd_pcm_drop( apiInfo->handles[1] );
7714 pthread_cond_destroy( &apiInfo->runnable_cv );
7715 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
7716 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
7718 stream_.apiHandle = 0;
7721 for ( int i=0; i<2; i++ ) {
7722 if ( stream_.userBuffer[i] ) {
7723 free( stream_.userBuffer[i] );
7724 stream_.userBuffer[i] = 0;
7728 if ( stream_.deviceBuffer ) {
7729 free( stream_.deviceBuffer );
7730 stream_.deviceBuffer = 0;
7733 stream_.mode = UNINITIALIZED;
7734 stream_.state = STREAM_CLOSED;
7737 void RtApiAlsa :: startStream()
7739 // This method calls snd_pcm_prepare if the device isn't already in that state.
7742 if ( stream_.state == STREAM_RUNNING ) {
7743 errorText_ = "RtApiAlsa::startStream(): the stream is already running!";
7744 error( RtAudioError::WARNING );
7748 MUTEX_LOCK( &stream_.mutex );
7751 snd_pcm_state_t state;
7752 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
7753 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
7754 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
7755 state = snd_pcm_state( handle[0] );
7756 if ( state != SND_PCM_STATE_PREPARED ) {
7757 result = snd_pcm_prepare( handle[0] );
7759 errorStream_ << "RtApiAlsa::startStream: error preparing output pcm device, " << snd_strerror( result ) << ".";
7760 errorText_ = errorStream_.str();
7766 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
7767 result = snd_pcm_drop(handle[1]); // fix to remove stale data received since device has been open
7768 state = snd_pcm_state( handle[1] );
7769 if ( state != SND_PCM_STATE_PREPARED ) {
7770 result = snd_pcm_prepare( handle[1] );
7772 errorStream_ << "RtApiAlsa::startStream: error preparing input pcm device, " << snd_strerror( result ) << ".";
7773 errorText_ = errorStream_.str();
7779 stream_.state = STREAM_RUNNING;
7782 apiInfo->runnable = true;
7783 pthread_cond_signal( &apiInfo->runnable_cv );
7784 MUTEX_UNLOCK( &stream_.mutex );
7786 if ( result >= 0 ) return;
7787 error( RtAudioError::SYSTEM_ERROR );
7790 void RtApiAlsa :: stopStream()
7793 if ( stream_.state == STREAM_STOPPED ) {
7794 errorText_ = "RtApiAlsa::stopStream(): the stream is already stopped!";
7795 error( RtAudioError::WARNING );
7799 stream_.state = STREAM_STOPPED;
7800 MUTEX_LOCK( &stream_.mutex );
7803 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
7804 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
7805 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
7806 if ( apiInfo->synchronized )
7807 result = snd_pcm_drop( handle[0] );
7809 result = snd_pcm_drain( handle[0] );
7811 errorStream_ << "RtApiAlsa::stopStream: error draining output pcm device, " << snd_strerror( result ) << ".";
7812 errorText_ = errorStream_.str();
7817 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
7818 result = snd_pcm_drop( handle[1] );
7820 errorStream_ << "RtApiAlsa::stopStream: error stopping input pcm device, " << snd_strerror( result ) << ".";
7821 errorText_ = errorStream_.str();
7827 apiInfo->runnable = false; // fixes high CPU usage when stopped
7828 MUTEX_UNLOCK( &stream_.mutex );
7830 if ( result >= 0 ) return;
7831 error( RtAudioError::SYSTEM_ERROR );
7834 void RtApiAlsa :: abortStream()
7837 if ( stream_.state == STREAM_STOPPED ) {
7838 errorText_ = "RtApiAlsa::abortStream(): the stream is already stopped!";
7839 error( RtAudioError::WARNING );
7843 stream_.state = STREAM_STOPPED;
7844 MUTEX_LOCK( &stream_.mutex );
7847 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
7848 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
7849 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
7850 result = snd_pcm_drop( handle[0] );
7852 errorStream_ << "RtApiAlsa::abortStream: error aborting output pcm device, " << snd_strerror( result ) << ".";
7853 errorText_ = errorStream_.str();
7858 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
7859 result = snd_pcm_drop( handle[1] );
7861 errorStream_ << "RtApiAlsa::abortStream: error aborting input pcm device, " << snd_strerror( result ) << ".";
7862 errorText_ = errorStream_.str();
7868 apiInfo->runnable = false; // fixes high CPU usage when stopped
7869 MUTEX_UNLOCK( &stream_.mutex );
7871 if ( result >= 0 ) return;
7872 error( RtAudioError::SYSTEM_ERROR );
7875 void RtApiAlsa :: callbackEvent()
7877 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
7878 if ( stream_.state == STREAM_STOPPED ) {
7879 MUTEX_LOCK( &stream_.mutex );
7880 while ( !apiInfo->runnable )
7881 pthread_cond_wait( &apiInfo->runnable_cv, &stream_.mutex );
7883 if ( stream_.state != STREAM_RUNNING ) {
7884 MUTEX_UNLOCK( &stream_.mutex );
7887 MUTEX_UNLOCK( &stream_.mutex );
7890 if ( stream_.state == STREAM_CLOSED ) {
7891 errorText_ = "RtApiAlsa::callbackEvent(): the stream is closed ... this shouldn't happen!";
7892 error( RtAudioError::WARNING );
7896 int doStopStream = 0;
7897 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
7898 double streamTime = getStreamTime();
7899 RtAudioStreamStatus status = 0;
7900 if ( stream_.mode != INPUT && apiInfo->xrun[0] == true ) {
7901 status |= RTAUDIO_OUTPUT_UNDERFLOW;
7902 apiInfo->xrun[0] = false;
7904 if ( stream_.mode != OUTPUT && apiInfo->xrun[1] == true ) {
7905 status |= RTAUDIO_INPUT_OVERFLOW;
7906 apiInfo->xrun[1] = false;
7908 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
7909 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
7911 if ( doStopStream == 2 ) {
7916 MUTEX_LOCK( &stream_.mutex );
7918 // The state might change while waiting on a mutex.
7919 if ( stream_.state == STREAM_STOPPED ) goto unlock;
7925 snd_pcm_sframes_t frames;
7926 RtAudioFormat format;
7927 handle = (snd_pcm_t **) apiInfo->handles;
7929 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
7931 // Setup parameters.
7932 if ( stream_.doConvertBuffer[1] ) {
7933 buffer = stream_.deviceBuffer;
7934 channels = stream_.nDeviceChannels[1];
7935 format = stream_.deviceFormat[1];
7938 buffer = stream_.userBuffer[1];
7939 channels = stream_.nUserChannels[1];
7940 format = stream_.userFormat;
7943 // Read samples from device in interleaved/non-interleaved format.
7944 if ( stream_.deviceInterleaved[1] )
7945 result = snd_pcm_readi( handle[1], buffer, stream_.bufferSize );
7947 void *bufs[channels];
7948 size_t offset = stream_.bufferSize * formatBytes( format );
7949 for ( int i=0; i<channels; i++ )
7950 bufs[i] = (void *) (buffer + (i * offset));
7951 result = snd_pcm_readn( handle[1], bufs, stream_.bufferSize );
7954 if ( result < (int) stream_.bufferSize ) {
7955 // Either an error or overrun occured.
7956 if ( result == -EPIPE ) {
7957 snd_pcm_state_t state = snd_pcm_state( handle[1] );
7958 if ( state == SND_PCM_STATE_XRUN ) {
7959 apiInfo->xrun[1] = true;
7960 result = snd_pcm_prepare( handle[1] );
7962 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after overrun, " << snd_strerror( result ) << ".";
7963 errorText_ = errorStream_.str();
7967 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
7968 errorText_ = errorStream_.str();
7972 errorStream_ << "RtApiAlsa::callbackEvent: audio read error, " << snd_strerror( result ) << ".";
7973 errorText_ = errorStream_.str();
7975 error( RtAudioError::WARNING );
7979 // Do byte swapping if necessary.
7980 if ( stream_.doByteSwap[1] )
7981 byteSwapBuffer( buffer, stream_.bufferSize * channels, format );
7983 // Do buffer conversion if necessary.
7984 if ( stream_.doConvertBuffer[1] )
7985 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
7987 // Check stream latency
7988 result = snd_pcm_delay( handle[1], &frames );
7989 if ( result == 0 && frames > 0 ) stream_.latency[1] = frames;
7994 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
7996 // Setup parameters and do buffer conversion if necessary.
7997 if ( stream_.doConvertBuffer[0] ) {
7998 buffer = stream_.deviceBuffer;
7999 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
8000 channels = stream_.nDeviceChannels[0];
8001 format = stream_.deviceFormat[0];
8004 buffer = stream_.userBuffer[0];
8005 channels = stream_.nUserChannels[0];
8006 format = stream_.userFormat;
8009 // Do byte swapping if necessary.
8010 if ( stream_.doByteSwap[0] )
8011 byteSwapBuffer(buffer, stream_.bufferSize * channels, format);
8013 // Write samples to device in interleaved/non-interleaved format.
8014 if ( stream_.deviceInterleaved[0] )
8015 result = snd_pcm_writei( handle[0], buffer, stream_.bufferSize );
8017 void *bufs[channels];
8018 size_t offset = stream_.bufferSize * formatBytes( format );
8019 for ( int i=0; i<channels; i++ )
8020 bufs[i] = (void *) (buffer + (i * offset));
8021 result = snd_pcm_writen( handle[0], bufs, stream_.bufferSize );
8024 if ( result < (int) stream_.bufferSize ) {
8025 // Either an error or underrun occured.
8026 if ( result == -EPIPE ) {
8027 snd_pcm_state_t state = snd_pcm_state( handle[0] );
8028 if ( state == SND_PCM_STATE_XRUN ) {
8029 apiInfo->xrun[0] = true;
8030 result = snd_pcm_prepare( handle[0] );
8032 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after underrun, " << snd_strerror( result ) << ".";
8033 errorText_ = errorStream_.str();
8036 errorText_ = "RtApiAlsa::callbackEvent: audio write error, underrun.";
8039 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
8040 errorText_ = errorStream_.str();
8044 errorStream_ << "RtApiAlsa::callbackEvent: audio write error, " << snd_strerror( result ) << ".";
8045 errorText_ = errorStream_.str();
8047 error( RtAudioError::WARNING );
8051 // Check stream latency
8052 result = snd_pcm_delay( handle[0], &frames );
8053 if ( result == 0 && frames > 0 ) stream_.latency[0] = frames;
8057 MUTEX_UNLOCK( &stream_.mutex );
8059 RtApi::tickStreamTime();
8060 if ( doStopStream == 1 ) this->stopStream();
8063 static void *alsaCallbackHandler( void *ptr )
8065 CallbackInfo *info = (CallbackInfo *) ptr;
8066 RtApiAlsa *object = (RtApiAlsa *) info->object;
8067 bool *isRunning = &info->isRunning;
8069 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
8070 if ( info->doRealtime ) {
8071 std::cerr << "RtAudio alsa: " <<
8072 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
8073 "running realtime scheduling" << std::endl;
8077 while ( *isRunning == true ) {
8078 pthread_testcancel();
8079 object->callbackEvent();
8082 pthread_exit( NULL );
8085 //******************** End of __LINUX_ALSA__ *********************//
8088 #if defined(__LINUX_PULSE__)
8090 // Code written by Peter Meerwald, pmeerw@pmeerw.net
8091 // and Tristan Matthews.
8093 #include <pulse/error.h>
8094 #include <pulse/simple.h>
8097 static const unsigned int SUPPORTED_SAMPLERATES[] = { 8000, 16000, 22050, 32000,
8098 44100, 48000, 96000, 0};
8100 struct rtaudio_pa_format_mapping_t {
8101 RtAudioFormat rtaudio_format;
8102 pa_sample_format_t pa_format;
8105 static const rtaudio_pa_format_mapping_t supported_sampleformats[] = {
8106 {RTAUDIO_SINT16, PA_SAMPLE_S16LE},
8107 {RTAUDIO_SINT32, PA_SAMPLE_S32LE},
8108 {RTAUDIO_FLOAT32, PA_SAMPLE_FLOAT32LE},
8109 {0, PA_SAMPLE_INVALID}};
8111 struct PulseAudioHandle {
8115 pthread_cond_t runnable_cv;
8117 PulseAudioHandle() : s_play(0), s_rec(0), runnable(false) { }
8120 RtApiPulse::~RtApiPulse()
8122 if ( stream_.state != STREAM_CLOSED )
8126 unsigned int RtApiPulse::getDeviceCount( void )
8131 RtAudio::DeviceInfo RtApiPulse::getDeviceInfo( unsigned int /*device*/ )
8133 RtAudio::DeviceInfo info;
8135 info.name = "PulseAudio";
8136 info.outputChannels = 2;
8137 info.inputChannels = 2;
8138 info.duplexChannels = 2;
8139 info.isDefaultOutput = true;
8140 info.isDefaultInput = true;
8142 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr )
8143 info.sampleRates.push_back( *sr );
8145 info.preferredSampleRate = 48000;
8146 info.nativeFormats = RTAUDIO_SINT16 | RTAUDIO_SINT32 | RTAUDIO_FLOAT32;
8151 static void *pulseaudio_callback( void * user )
8153 CallbackInfo *cbi = static_cast<CallbackInfo *>( user );
8154 RtApiPulse *context = static_cast<RtApiPulse *>( cbi->object );
8155 volatile bool *isRunning = &cbi->isRunning;
8157 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
8158 if (cbi->doRealtime) {
8159 std::cerr << "RtAudio pulse: " <<
8160 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
8161 "running realtime scheduling" << std::endl;
8165 while ( *isRunning ) {
8166 pthread_testcancel();
8167 context->callbackEvent();
8170 pthread_exit( NULL );
8173 void RtApiPulse::closeStream( void )
8175 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8177 stream_.callbackInfo.isRunning = false;
8179 MUTEX_LOCK( &stream_.mutex );
8180 if ( stream_.state == STREAM_STOPPED ) {
8181 pah->runnable = true;
8182 pthread_cond_signal( &pah->runnable_cv );
8184 MUTEX_UNLOCK( &stream_.mutex );
8186 pthread_join( pah->thread, 0 );
8187 if ( pah->s_play ) {
8188 pa_simple_flush( pah->s_play, NULL );
8189 pa_simple_free( pah->s_play );
8192 pa_simple_free( pah->s_rec );
8194 pthread_cond_destroy( &pah->runnable_cv );
8196 stream_.apiHandle = 0;
8199 if ( stream_.userBuffer[0] ) {
8200 free( stream_.userBuffer[0] );
8201 stream_.userBuffer[0] = 0;
8203 if ( stream_.userBuffer[1] ) {
8204 free( stream_.userBuffer[1] );
8205 stream_.userBuffer[1] = 0;
8208 stream_.state = STREAM_CLOSED;
8209 stream_.mode = UNINITIALIZED;
8212 void RtApiPulse::callbackEvent( void )
8214 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8216 if ( stream_.state == STREAM_STOPPED ) {
8217 MUTEX_LOCK( &stream_.mutex );
8218 while ( !pah->runnable )
8219 pthread_cond_wait( &pah->runnable_cv, &stream_.mutex );
8221 if ( stream_.state != STREAM_RUNNING ) {
8222 MUTEX_UNLOCK( &stream_.mutex );
8225 MUTEX_UNLOCK( &stream_.mutex );
8228 if ( stream_.state == STREAM_CLOSED ) {
8229 errorText_ = "RtApiPulse::callbackEvent(): the stream is closed ... "
8230 "this shouldn't happen!";
8231 error( RtAudioError::WARNING );
8235 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
8236 double streamTime = getStreamTime();
8237 RtAudioStreamStatus status = 0;
8238 int doStopStream = callback( stream_.userBuffer[OUTPUT], stream_.userBuffer[INPUT],
8239 stream_.bufferSize, streamTime, status,
8240 stream_.callbackInfo.userData );
8242 if ( doStopStream == 2 ) {
8247 MUTEX_LOCK( &stream_.mutex );
8248 void *pulse_in = stream_.doConvertBuffer[INPUT] ? stream_.deviceBuffer : stream_.userBuffer[INPUT];
8249 void *pulse_out = stream_.doConvertBuffer[OUTPUT] ? stream_.deviceBuffer : stream_.userBuffer[OUTPUT];
8251 if ( stream_.state != STREAM_RUNNING )
8256 if (stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8257 if ( stream_.doConvertBuffer[OUTPUT] ) {
8258 convertBuffer( stream_.deviceBuffer,
8259 stream_.userBuffer[OUTPUT],
8260 stream_.convertInfo[OUTPUT] );
8261 bytes = stream_.nDeviceChannels[OUTPUT] * stream_.bufferSize *
8262 formatBytes( stream_.deviceFormat[OUTPUT] );
8264 bytes = stream_.nUserChannels[OUTPUT] * stream_.bufferSize *
8265 formatBytes( stream_.userFormat );
8267 if ( pa_simple_write( pah->s_play, pulse_out, bytes, &pa_error ) < 0 ) {
8268 errorStream_ << "RtApiPulse::callbackEvent: audio write error, " <<
8269 pa_strerror( pa_error ) << ".";
8270 errorText_ = errorStream_.str();
8271 error( RtAudioError::WARNING );
8275 if ( stream_.mode == INPUT || stream_.mode == DUPLEX) {
8276 if ( stream_.doConvertBuffer[INPUT] )
8277 bytes = stream_.nDeviceChannels[INPUT] * stream_.bufferSize *
8278 formatBytes( stream_.deviceFormat[INPUT] );
8280 bytes = stream_.nUserChannels[INPUT] * stream_.bufferSize *
8281 formatBytes( stream_.userFormat );
8283 if ( pa_simple_read( pah->s_rec, pulse_in, bytes, &pa_error ) < 0 ) {
8284 errorStream_ << "RtApiPulse::callbackEvent: audio read error, " <<
8285 pa_strerror( pa_error ) << ".";
8286 errorText_ = errorStream_.str();
8287 error( RtAudioError::WARNING );
8289 if ( stream_.doConvertBuffer[INPUT] ) {
8290 convertBuffer( stream_.userBuffer[INPUT],
8291 stream_.deviceBuffer,
8292 stream_.convertInfo[INPUT] );
8297 MUTEX_UNLOCK( &stream_.mutex );
8298 RtApi::tickStreamTime();
8300 if ( doStopStream == 1 )
8304 void RtApiPulse::startStream( void )
8306 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8308 if ( stream_.state == STREAM_CLOSED ) {
8309 errorText_ = "RtApiPulse::startStream(): the stream is not open!";
8310 error( RtAudioError::INVALID_USE );
8313 if ( stream_.state == STREAM_RUNNING ) {
8314 errorText_ = "RtApiPulse::startStream(): the stream is already running!";
8315 error( RtAudioError::WARNING );
8319 MUTEX_LOCK( &stream_.mutex );
8321 stream_.state = STREAM_RUNNING;
8323 pah->runnable = true;
8324 pthread_cond_signal( &pah->runnable_cv );
8325 MUTEX_UNLOCK( &stream_.mutex );
8328 void RtApiPulse::stopStream( void )
8330 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8332 if ( stream_.state == STREAM_CLOSED ) {
8333 errorText_ = "RtApiPulse::stopStream(): the stream is not open!";
8334 error( RtAudioError::INVALID_USE );
8337 if ( stream_.state == STREAM_STOPPED ) {
8338 errorText_ = "RtApiPulse::stopStream(): the stream is already stopped!";
8339 error( RtAudioError::WARNING );
8343 stream_.state = STREAM_STOPPED;
8344 MUTEX_LOCK( &stream_.mutex );
8346 if ( pah && pah->s_play ) {
8348 if ( pa_simple_drain( pah->s_play, &pa_error ) < 0 ) {
8349 errorStream_ << "RtApiPulse::stopStream: error draining output device, " <<
8350 pa_strerror( pa_error ) << ".";
8351 errorText_ = errorStream_.str();
8352 MUTEX_UNLOCK( &stream_.mutex );
8353 error( RtAudioError::SYSTEM_ERROR );
8358 stream_.state = STREAM_STOPPED;
8359 MUTEX_UNLOCK( &stream_.mutex );
8362 void RtApiPulse::abortStream( void )
8364 PulseAudioHandle *pah = static_cast<PulseAudioHandle*>( stream_.apiHandle );
8366 if ( stream_.state == STREAM_CLOSED ) {
8367 errorText_ = "RtApiPulse::abortStream(): the stream is not open!";
8368 error( RtAudioError::INVALID_USE );
8371 if ( stream_.state == STREAM_STOPPED ) {
8372 errorText_ = "RtApiPulse::abortStream(): the stream is already stopped!";
8373 error( RtAudioError::WARNING );
8377 stream_.state = STREAM_STOPPED;
8378 MUTEX_LOCK( &stream_.mutex );
8380 if ( pah && pah->s_play ) {
8382 if ( pa_simple_flush( pah->s_play, &pa_error ) < 0 ) {
8383 errorStream_ << "RtApiPulse::abortStream: error flushing output device, " <<
8384 pa_strerror( pa_error ) << ".";
8385 errorText_ = errorStream_.str();
8386 MUTEX_UNLOCK( &stream_.mutex );
8387 error( RtAudioError::SYSTEM_ERROR );
8392 stream_.state = STREAM_STOPPED;
8393 MUTEX_UNLOCK( &stream_.mutex );
8396 bool RtApiPulse::probeDeviceOpen( unsigned int device, StreamMode mode,
8397 unsigned int channels, unsigned int firstChannel,
8398 unsigned int sampleRate, RtAudioFormat format,
8399 unsigned int *bufferSize, RtAudio::StreamOptions *options )
8401 PulseAudioHandle *pah = 0;
8402 unsigned long bufferBytes = 0;
8405 if ( device != 0 ) return false;
8406 if ( mode != INPUT && mode != OUTPUT ) return false;
8407 if ( channels != 1 && channels != 2 ) {
8408 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported number of channels.";
8411 ss.channels = channels;
8413 if ( firstChannel != 0 ) return false;
8415 bool sr_found = false;
8416 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr ) {
8417 if ( sampleRate == *sr ) {
8419 stream_.sampleRate = sampleRate;
8420 ss.rate = sampleRate;
8425 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported sample rate.";
8430 for ( const rtaudio_pa_format_mapping_t *sf = supported_sampleformats;
8431 sf->rtaudio_format && sf->pa_format != PA_SAMPLE_INVALID; ++sf ) {
8432 if ( format == sf->rtaudio_format ) {
8434 stream_.userFormat = sf->rtaudio_format;
8435 stream_.deviceFormat[mode] = stream_.userFormat;
8436 ss.format = sf->pa_format;
8440 if ( !sf_found ) { // Use internal data format conversion.
8441 stream_.userFormat = format;
8442 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
8443 ss.format = PA_SAMPLE_FLOAT32LE;
8446 // Set other stream parameters.
8447 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
8448 else stream_.userInterleaved = true;
8449 stream_.deviceInterleaved[mode] = true;
8450 stream_.nBuffers = 1;
8451 stream_.doByteSwap[mode] = false;
8452 stream_.nUserChannels[mode] = channels;
8453 stream_.nDeviceChannels[mode] = channels + firstChannel;
8454 stream_.channelOffset[mode] = 0;
8455 std::string streamName = "RtAudio";
8457 // Set flags for buffer conversion.
8458 stream_.doConvertBuffer[mode] = false;
8459 if ( stream_.userFormat != stream_.deviceFormat[mode] )
8460 stream_.doConvertBuffer[mode] = true;
8461 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
8462 stream_.doConvertBuffer[mode] = true;
8464 // Allocate necessary internal buffers.
8465 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
8466 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
8467 if ( stream_.userBuffer[mode] == NULL ) {
8468 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating user buffer memory.";
8471 stream_.bufferSize = *bufferSize;
8473 if ( stream_.doConvertBuffer[mode] ) {
8475 bool makeBuffer = true;
8476 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
8477 if ( mode == INPUT ) {
8478 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
8479 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
8480 if ( bufferBytes <= bytesOut ) makeBuffer = false;
8485 bufferBytes *= *bufferSize;
8486 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
8487 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
8488 if ( stream_.deviceBuffer == NULL ) {
8489 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating device buffer memory.";
8495 stream_.device[mode] = device;
8497 // Setup the buffer conversion information structure.
8498 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
8500 if ( !stream_.apiHandle ) {
8501 PulseAudioHandle *pah = new PulseAudioHandle;
8503 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating memory for handle.";
8507 stream_.apiHandle = pah;
8508 if ( pthread_cond_init( &pah->runnable_cv, NULL ) != 0 ) {
8509 errorText_ = "RtApiPulse::probeDeviceOpen: error creating condition variable.";
8513 pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8516 if ( options && !options->streamName.empty() ) streamName = options->streamName;
8519 pa_buffer_attr buffer_attr;
8520 buffer_attr.fragsize = bufferBytes;
8521 buffer_attr.maxlength = -1;
8523 pah->s_rec = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_RECORD, NULL, "Record", &ss, NULL, &buffer_attr, &error );
8524 if ( !pah->s_rec ) {
8525 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting input to PulseAudio server.";
8530 pah->s_play = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_PLAYBACK, NULL, "Playback", &ss, NULL, NULL, &error );
8531 if ( !pah->s_play ) {
8532 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting output to PulseAudio server.";
8540 if ( stream_.mode == UNINITIALIZED )
8541 stream_.mode = mode;
8542 else if ( stream_.mode == mode )
8545 stream_.mode = DUPLEX;
8547 if ( !stream_.callbackInfo.isRunning ) {
8548 stream_.callbackInfo.object = this;
8550 stream_.state = STREAM_STOPPED;
8551 // Set the thread attributes for joinable and realtime scheduling
8552 // priority (optional). The higher priority will only take affect
8553 // if the program is run as root or suid. Note, under Linux
8554 // processes with CAP_SYS_NICE privilege, a user can change
8555 // scheduling policy and priority (thus need not be root). See
8556 // POSIX "capabilities".
8557 pthread_attr_t attr;
8558 pthread_attr_init( &attr );
8559 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
8560 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
8561 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
8562 stream_.callbackInfo.doRealtime = true;
8563 struct sched_param param;
8564 int priority = options->priority;
8565 int min = sched_get_priority_min( SCHED_RR );
8566 int max = sched_get_priority_max( SCHED_RR );
8567 if ( priority < min ) priority = min;
8568 else if ( priority > max ) priority = max;
8569 param.sched_priority = priority;
8571 // Set the policy BEFORE the priority. Otherwise it fails.
8572 pthread_attr_setschedpolicy(&attr, SCHED_RR);
8573 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
8574 // This is definitely required. Otherwise it fails.
8575 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
8576 pthread_attr_setschedparam(&attr, ¶m);
8579 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
8581 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
8584 stream_.callbackInfo.isRunning = true;
8585 int result = pthread_create( &pah->thread, &attr, pulseaudio_callback, (void *)&stream_.callbackInfo);
8586 pthread_attr_destroy(&attr);
8588 // Failed. Try instead with default attributes.
8589 result = pthread_create( &pah->thread, NULL, pulseaudio_callback, (void *)&stream_.callbackInfo);
8591 stream_.callbackInfo.isRunning = false;
8592 errorText_ = "RtApiPulse::probeDeviceOpen: error creating thread.";
8601 if ( pah && stream_.callbackInfo.isRunning ) {
8602 pthread_cond_destroy( &pah->runnable_cv );
8604 stream_.apiHandle = 0;
8607 for ( int i=0; i<2; i++ ) {
8608 if ( stream_.userBuffer[i] ) {
8609 free( stream_.userBuffer[i] );
8610 stream_.userBuffer[i] = 0;
8614 if ( stream_.deviceBuffer ) {
8615 free( stream_.deviceBuffer );
8616 stream_.deviceBuffer = 0;
8619 stream_.state = STREAM_CLOSED;
8623 //******************** End of __LINUX_PULSE__ *********************//
8626 #if defined(__LINUX_OSS__)
8629 #include <sys/ioctl.h>
8632 #include <sys/soundcard.h>
8636 static void *ossCallbackHandler(void * ptr);
8638 // A structure to hold various information related to the OSS API
8641 int id[2]; // device ids
8644 pthread_cond_t runnable;
8647 :triggered(false) { id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
8650 RtApiOss :: RtApiOss()
8652 // Nothing to do here.
8655 RtApiOss :: ~RtApiOss()
8657 if ( stream_.state != STREAM_CLOSED ) closeStream();
8660 unsigned int RtApiOss :: getDeviceCount( void )
8662 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
8663 if ( mixerfd == -1 ) {
8664 errorText_ = "RtApiOss::getDeviceCount: error opening '/dev/mixer'.";
8665 error( RtAudioError::WARNING );
8669 oss_sysinfo sysinfo;
8670 if ( ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo ) == -1 ) {
8672 errorText_ = "RtApiOss::getDeviceCount: error getting sysinfo, OSS version >= 4.0 is required.";
8673 error( RtAudioError::WARNING );
8678 return sysinfo.numaudios;
8681 RtAudio::DeviceInfo RtApiOss :: getDeviceInfo( unsigned int device )
8683 RtAudio::DeviceInfo info;
8684 info.probed = false;
8686 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
8687 if ( mixerfd == -1 ) {
8688 errorText_ = "RtApiOss::getDeviceInfo: error opening '/dev/mixer'.";
8689 error( RtAudioError::WARNING );
8693 oss_sysinfo sysinfo;
8694 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
8695 if ( result == -1 ) {
8697 errorText_ = "RtApiOss::getDeviceInfo: error getting sysinfo, OSS version >= 4.0 is required.";
8698 error( RtAudioError::WARNING );
8702 unsigned nDevices = sysinfo.numaudios;
8703 if ( nDevices == 0 ) {
8705 errorText_ = "RtApiOss::getDeviceInfo: no devices found!";
8706 error( RtAudioError::INVALID_USE );
8710 if ( device >= nDevices ) {
8712 errorText_ = "RtApiOss::getDeviceInfo: device ID is invalid!";
8713 error( RtAudioError::INVALID_USE );
8717 oss_audioinfo ainfo;
8719 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
8721 if ( result == -1 ) {
8722 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
8723 errorText_ = errorStream_.str();
8724 error( RtAudioError::WARNING );
8729 if ( ainfo.caps & PCM_CAP_OUTPUT ) info.outputChannels = ainfo.max_channels;
8730 if ( ainfo.caps & PCM_CAP_INPUT ) info.inputChannels = ainfo.max_channels;
8731 if ( ainfo.caps & PCM_CAP_DUPLEX ) {
8732 if ( info.outputChannels > 0 && info.inputChannels > 0 && ainfo.caps & PCM_CAP_DUPLEX )
8733 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
8736 // Probe data formats ... do for input
8737 unsigned long mask = ainfo.iformats;
8738 if ( mask & AFMT_S16_LE || mask & AFMT_S16_BE )
8739 info.nativeFormats |= RTAUDIO_SINT16;
8740 if ( mask & AFMT_S8 )
8741 info.nativeFormats |= RTAUDIO_SINT8;
8742 if ( mask & AFMT_S32_LE || mask & AFMT_S32_BE )
8743 info.nativeFormats |= RTAUDIO_SINT32;
8745 if ( mask & AFMT_FLOAT )
8746 info.nativeFormats |= RTAUDIO_FLOAT32;
8748 if ( mask & AFMT_S24_LE || mask & AFMT_S24_BE )
8749 info.nativeFormats |= RTAUDIO_SINT24;
8751 // Check that we have at least one supported format
8752 if ( info.nativeFormats == 0 ) {
8753 errorStream_ << "RtApiOss::getDeviceInfo: device (" << ainfo.name << ") data format not supported by RtAudio.";
8754 errorText_ = errorStream_.str();
8755 error( RtAudioError::WARNING );
8759 // Probe the supported sample rates.
8760 info.sampleRates.clear();
8761 if ( ainfo.nrates ) {
8762 for ( unsigned int i=0; i<ainfo.nrates; i++ ) {
8763 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
8764 if ( ainfo.rates[i] == SAMPLE_RATES[k] ) {
8765 info.sampleRates.push_back( SAMPLE_RATES[k] );
8767 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
8768 info.preferredSampleRate = SAMPLE_RATES[k];
8776 // Check min and max rate values;
8777 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
8778 if ( ainfo.min_rate <= (int) SAMPLE_RATES[k] && ainfo.max_rate >= (int) SAMPLE_RATES[k] ) {
8779 info.sampleRates.push_back( SAMPLE_RATES[k] );
8781 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
8782 info.preferredSampleRate = SAMPLE_RATES[k];
8787 if ( info.sampleRates.size() == 0 ) {
8788 errorStream_ << "RtApiOss::getDeviceInfo: no supported sample rates found for device (" << ainfo.name << ").";
8789 errorText_ = errorStream_.str();
8790 error( RtAudioError::WARNING );
8794 info.name = ainfo.name;
8801 bool RtApiOss :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
8802 unsigned int firstChannel, unsigned int sampleRate,
8803 RtAudioFormat format, unsigned int *bufferSize,
8804 RtAudio::StreamOptions *options )
8806 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
8807 if ( mixerfd == -1 ) {
8808 errorText_ = "RtApiOss::probeDeviceOpen: error opening '/dev/mixer'.";
8812 oss_sysinfo sysinfo;
8813 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
8814 if ( result == -1 ) {
8816 errorText_ = "RtApiOss::probeDeviceOpen: error getting sysinfo, OSS version >= 4.0 is required.";
8820 unsigned nDevices = sysinfo.numaudios;
8821 if ( nDevices == 0 ) {
8822 // This should not happen because a check is made before this function is called.
8824 errorText_ = "RtApiOss::probeDeviceOpen: no devices found!";
8828 if ( device >= nDevices ) {
8829 // This should not happen because a check is made before this function is called.
8831 errorText_ = "RtApiOss::probeDeviceOpen: device ID is invalid!";
8835 oss_audioinfo ainfo;
8837 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
8839 if ( result == -1 ) {
8840 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
8841 errorText_ = errorStream_.str();
8845 // Check if device supports input or output
8846 if ( ( mode == OUTPUT && !( ainfo.caps & PCM_CAP_OUTPUT ) ) ||
8847 ( mode == INPUT && !( ainfo.caps & PCM_CAP_INPUT ) ) ) {
8848 if ( mode == OUTPUT )
8849 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support output.";
8851 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support input.";
8852 errorText_ = errorStream_.str();
8857 OssHandle *handle = (OssHandle *) stream_.apiHandle;
8858 if ( mode == OUTPUT )
8860 else { // mode == INPUT
8861 if (stream_.mode == OUTPUT && stream_.device[0] == device) {
8862 // We just set the same device for playback ... close and reopen for duplex (OSS only).
8863 close( handle->id[0] );
8865 if ( !( ainfo.caps & PCM_CAP_DUPLEX ) ) {
8866 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support duplex mode.";
8867 errorText_ = errorStream_.str();
8870 // Check that the number previously set channels is the same.
8871 if ( stream_.nUserChannels[0] != channels ) {
8872 errorStream_ << "RtApiOss::probeDeviceOpen: input/output channels must be equal for OSS duplex device (" << ainfo.name << ").";
8873 errorText_ = errorStream_.str();
8882 // Set exclusive access if specified.
8883 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) flags |= O_EXCL;
8885 // Try to open the device.
8887 fd = open( ainfo.devnode, flags, 0 );
8889 if ( errno == EBUSY )
8890 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") is busy.";
8892 errorStream_ << "RtApiOss::probeDeviceOpen: error opening device (" << ainfo.name << ").";
8893 errorText_ = errorStream_.str();
8897 // For duplex operation, specifically set this mode (this doesn't seem to work).
8899 if ( flags | O_RDWR ) {
8900 result = ioctl( fd, SNDCTL_DSP_SETDUPLEX, NULL );
8901 if ( result == -1) {
8902 errorStream_ << "RtApiOss::probeDeviceOpen: error setting duplex mode for device (" << ainfo.name << ").";
8903 errorText_ = errorStream_.str();
8909 // Check the device channel support.
8910 stream_.nUserChannels[mode] = channels;
8911 if ( ainfo.max_channels < (int)(channels + firstChannel) ) {
8913 errorStream_ << "RtApiOss::probeDeviceOpen: the device (" << ainfo.name << ") does not support requested channel parameters.";
8914 errorText_ = errorStream_.str();
8918 // Set the number of channels.
8919 int deviceChannels = channels + firstChannel;
8920 result = ioctl( fd, SNDCTL_DSP_CHANNELS, &deviceChannels );
8921 if ( result == -1 || deviceChannels < (int)(channels + firstChannel) ) {
8923 errorStream_ << "RtApiOss::probeDeviceOpen: error setting channel parameters on device (" << ainfo.name << ").";
8924 errorText_ = errorStream_.str();
8927 stream_.nDeviceChannels[mode] = deviceChannels;
8929 // Get the data format mask
8931 result = ioctl( fd, SNDCTL_DSP_GETFMTS, &mask );
8932 if ( result == -1 ) {
8934 errorStream_ << "RtApiOss::probeDeviceOpen: error getting device (" << ainfo.name << ") data formats.";
8935 errorText_ = errorStream_.str();
8939 // Determine how to set the device format.
8940 stream_.userFormat = format;
8941 int deviceFormat = -1;
8942 stream_.doByteSwap[mode] = false;
8943 if ( format == RTAUDIO_SINT8 ) {
8944 if ( mask & AFMT_S8 ) {
8945 deviceFormat = AFMT_S8;
8946 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
8949 else if ( format == RTAUDIO_SINT16 ) {
8950 if ( mask & AFMT_S16_NE ) {
8951 deviceFormat = AFMT_S16_NE;
8952 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
8954 else if ( mask & AFMT_S16_OE ) {
8955 deviceFormat = AFMT_S16_OE;
8956 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
8957 stream_.doByteSwap[mode] = true;
8960 else if ( format == RTAUDIO_SINT24 ) {
8961 if ( mask & AFMT_S24_NE ) {
8962 deviceFormat = AFMT_S24_NE;
8963 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
8965 else if ( mask & AFMT_S24_OE ) {
8966 deviceFormat = AFMT_S24_OE;
8967 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
8968 stream_.doByteSwap[mode] = true;
8971 else if ( format == RTAUDIO_SINT32 ) {
8972 if ( mask & AFMT_S32_NE ) {
8973 deviceFormat = AFMT_S32_NE;
8974 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
8976 else if ( mask & AFMT_S32_OE ) {
8977 deviceFormat = AFMT_S32_OE;
8978 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
8979 stream_.doByteSwap[mode] = true;
8983 if ( deviceFormat == -1 ) {
8984 // The user requested format is not natively supported by the device.
8985 if ( mask & AFMT_S16_NE ) {
8986 deviceFormat = AFMT_S16_NE;
8987 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
8989 else if ( mask & AFMT_S32_NE ) {
8990 deviceFormat = AFMT_S32_NE;
8991 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
8993 else if ( mask & AFMT_S24_NE ) {
8994 deviceFormat = AFMT_S24_NE;
8995 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
8997 else if ( mask & AFMT_S16_OE ) {
8998 deviceFormat = AFMT_S16_OE;
8999 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
9000 stream_.doByteSwap[mode] = true;
9002 else if ( mask & AFMT_S32_OE ) {
9003 deviceFormat = AFMT_S32_OE;
9004 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
9005 stream_.doByteSwap[mode] = true;
9007 else if ( mask & AFMT_S24_OE ) {
9008 deviceFormat = AFMT_S24_OE;
9009 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
9010 stream_.doByteSwap[mode] = true;
9012 else if ( mask & AFMT_S8) {
9013 deviceFormat = AFMT_S8;
9014 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
9018 if ( stream_.deviceFormat[mode] == 0 ) {
9019 // This really shouldn't happen ...
9021 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") data format not supported by RtAudio.";
9022 errorText_ = errorStream_.str();
9026 // Set the data format.
9027 int temp = deviceFormat;
9028 result = ioctl( fd, SNDCTL_DSP_SETFMT, &deviceFormat );
9029 if ( result == -1 || deviceFormat != temp ) {
9031 errorStream_ << "RtApiOss::probeDeviceOpen: error setting data format on device (" << ainfo.name << ").";
9032 errorText_ = errorStream_.str();
9036 // Attempt to set the buffer size. According to OSS, the minimum
9037 // number of buffers is two. The supposed minimum buffer size is 16
9038 // bytes, so that will be our lower bound. The argument to this
9039 // call is in the form 0xMMMMSSSS (hex), where the buffer size (in
9040 // bytes) is given as 2^SSSS and the number of buffers as 2^MMMM.
9041 // We'll check the actual value used near the end of the setup
9043 int ossBufferBytes = *bufferSize * formatBytes( stream_.deviceFormat[mode] ) * deviceChannels;
9044 if ( ossBufferBytes < 16 ) ossBufferBytes = 16;
9046 if ( options ) buffers = options->numberOfBuffers;
9047 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) buffers = 2;
9048 if ( buffers < 2 ) buffers = 3;
9049 temp = ((int) buffers << 16) + (int)( log10( (double)ossBufferBytes ) / log10( 2.0 ) );
9050 result = ioctl( fd, SNDCTL_DSP_SETFRAGMENT, &temp );
9051 if ( result == -1 ) {
9053 errorStream_ << "RtApiOss::probeDeviceOpen: error setting buffer size on device (" << ainfo.name << ").";
9054 errorText_ = errorStream_.str();
9057 stream_.nBuffers = buffers;
9059 // Save buffer size (in sample frames).
9060 *bufferSize = ossBufferBytes / ( formatBytes(stream_.deviceFormat[mode]) * deviceChannels );
9061 stream_.bufferSize = *bufferSize;
9063 // Set the sample rate.
9064 int srate = sampleRate;
9065 result = ioctl( fd, SNDCTL_DSP_SPEED, &srate );
9066 if ( result == -1 ) {
9068 errorStream_ << "RtApiOss::probeDeviceOpen: error setting sample rate (" << sampleRate << ") on device (" << ainfo.name << ").";
9069 errorText_ = errorStream_.str();
9073 // Verify the sample rate setup worked.
9074 if ( abs( srate - (int)sampleRate ) > 100 ) {
9076 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support sample rate (" << sampleRate << ").";
9077 errorText_ = errorStream_.str();
9080 stream_.sampleRate = sampleRate;
9082 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device) {
9083 // We're doing duplex setup here.
9084 stream_.deviceFormat[0] = stream_.deviceFormat[1];
9085 stream_.nDeviceChannels[0] = deviceChannels;
9088 // Set interleaving parameters.
9089 stream_.userInterleaved = true;
9090 stream_.deviceInterleaved[mode] = true;
9091 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
9092 stream_.userInterleaved = false;
9094 // Set flags for buffer conversion
9095 stream_.doConvertBuffer[mode] = false;
9096 if ( stream_.userFormat != stream_.deviceFormat[mode] )
9097 stream_.doConvertBuffer[mode] = true;
9098 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
9099 stream_.doConvertBuffer[mode] = true;
9100 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
9101 stream_.nUserChannels[mode] > 1 )
9102 stream_.doConvertBuffer[mode] = true;
9104 // Allocate the stream handles if necessary and then save.
9105 if ( stream_.apiHandle == 0 ) {
9107 handle = new OssHandle;
9109 catch ( std::bad_alloc& ) {
9110 errorText_ = "RtApiOss::probeDeviceOpen: error allocating OssHandle memory.";
9114 if ( pthread_cond_init( &handle->runnable, NULL ) ) {
9115 errorText_ = "RtApiOss::probeDeviceOpen: error initializing pthread condition variable.";
9119 stream_.apiHandle = (void *) handle;
9122 handle = (OssHandle *) stream_.apiHandle;
9124 handle->id[mode] = fd;
9126 // Allocate necessary internal buffers.
9127 unsigned long bufferBytes;
9128 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
9129 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
9130 if ( stream_.userBuffer[mode] == NULL ) {
9131 errorText_ = "RtApiOss::probeDeviceOpen: error allocating user buffer memory.";
9135 if ( stream_.doConvertBuffer[mode] ) {
9137 bool makeBuffer = true;
9138 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
9139 if ( mode == INPUT ) {
9140 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
9141 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
9142 if ( bufferBytes <= bytesOut ) makeBuffer = false;
9147 bufferBytes *= *bufferSize;
9148 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
9149 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
9150 if ( stream_.deviceBuffer == NULL ) {
9151 errorText_ = "RtApiOss::probeDeviceOpen: error allocating device buffer memory.";
9157 stream_.device[mode] = device;
9158 stream_.state = STREAM_STOPPED;
9160 // Setup the buffer conversion information structure.
9161 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
9163 // Setup thread if necessary.
9164 if ( stream_.mode == OUTPUT && mode == INPUT ) {
9165 // We had already set up an output stream.
9166 stream_.mode = DUPLEX;
9167 if ( stream_.device[0] == device ) handle->id[0] = fd;
9170 stream_.mode = mode;
9172 // Setup callback thread.
9173 stream_.callbackInfo.object = (void *) this;
9175 // Set the thread attributes for joinable and realtime scheduling
9176 // priority. The higher priority will only take affect if the
9177 // program is run as root or suid.
9178 pthread_attr_t attr;
9179 pthread_attr_init( &attr );
9180 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
9181 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
9182 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
9183 stream_.callbackInfo.doRealtime = true;
9184 struct sched_param param;
9185 int priority = options->priority;
9186 int min = sched_get_priority_min( SCHED_RR );
9187 int max = sched_get_priority_max( SCHED_RR );
9188 if ( priority < min ) priority = min;
9189 else if ( priority > max ) priority = max;
9190 param.sched_priority = priority;
9192 // Set the policy BEFORE the priority. Otherwise it fails.
9193 pthread_attr_setschedpolicy(&attr, SCHED_RR);
9194 pthread_attr_setscope (&attr, PTHREAD_SCOPE_SYSTEM);
9195 // This is definitely required. Otherwise it fails.
9196 pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
9197 pthread_attr_setschedparam(&attr, ¶m);
9200 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
9202 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
9205 stream_.callbackInfo.isRunning = true;
9206 result = pthread_create( &stream_.callbackInfo.thread, &attr, ossCallbackHandler, &stream_.callbackInfo );
9207 pthread_attr_destroy( &attr );
9209 // Failed. Try instead with default attributes.
9210 result = pthread_create( &stream_.callbackInfo.thread, NULL, ossCallbackHandler, &stream_.callbackInfo );
9212 stream_.callbackInfo.isRunning = false;
9213 errorText_ = "RtApiOss::error creating callback thread!";
9223 pthread_cond_destroy( &handle->runnable );
9224 if ( handle->id[0] ) close( handle->id[0] );
9225 if ( handle->id[1] ) close( handle->id[1] );
9227 stream_.apiHandle = 0;
9230 for ( int i=0; i<2; i++ ) {
9231 if ( stream_.userBuffer[i] ) {
9232 free( stream_.userBuffer[i] );
9233 stream_.userBuffer[i] = 0;
9237 if ( stream_.deviceBuffer ) {
9238 free( stream_.deviceBuffer );
9239 stream_.deviceBuffer = 0;
9242 stream_.state = STREAM_CLOSED;
9246 void RtApiOss :: closeStream()
9248 if ( stream_.state == STREAM_CLOSED ) {
9249 errorText_ = "RtApiOss::closeStream(): no open stream to close!";
9250 error( RtAudioError::WARNING );
9254 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9255 stream_.callbackInfo.isRunning = false;
9256 MUTEX_LOCK( &stream_.mutex );
9257 if ( stream_.state == STREAM_STOPPED )
9258 pthread_cond_signal( &handle->runnable );
9259 MUTEX_UNLOCK( &stream_.mutex );
9260 pthread_join( stream_.callbackInfo.thread, NULL );
9262 if ( stream_.state == STREAM_RUNNING ) {
9263 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
9264 ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9266 ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9267 stream_.state = STREAM_STOPPED;
9271 pthread_cond_destroy( &handle->runnable );
9272 if ( handle->id[0] ) close( handle->id[0] );
9273 if ( handle->id[1] ) close( handle->id[1] );
9275 stream_.apiHandle = 0;
9278 for ( int i=0; i<2; i++ ) {
9279 if ( stream_.userBuffer[i] ) {
9280 free( stream_.userBuffer[i] );
9281 stream_.userBuffer[i] = 0;
9285 if ( stream_.deviceBuffer ) {
9286 free( stream_.deviceBuffer );
9287 stream_.deviceBuffer = 0;
9290 stream_.mode = UNINITIALIZED;
9291 stream_.state = STREAM_CLOSED;
9294 void RtApiOss :: startStream()
9297 if ( stream_.state == STREAM_RUNNING ) {
9298 errorText_ = "RtApiOss::startStream(): the stream is already running!";
9299 error( RtAudioError::WARNING );
9303 MUTEX_LOCK( &stream_.mutex );
9305 stream_.state = STREAM_RUNNING;
9307 // No need to do anything else here ... OSS automatically starts
9308 // when fed samples.
9310 MUTEX_UNLOCK( &stream_.mutex );
9312 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9313 pthread_cond_signal( &handle->runnable );
9316 void RtApiOss :: stopStream()
9319 if ( stream_.state == STREAM_STOPPED ) {
9320 errorText_ = "RtApiOss::stopStream(): the stream is already stopped!";
9321 error( RtAudioError::WARNING );
9325 MUTEX_LOCK( &stream_.mutex );
9327 // The state might change while waiting on a mutex.
9328 if ( stream_.state == STREAM_STOPPED ) {
9329 MUTEX_UNLOCK( &stream_.mutex );
9334 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9335 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9337 // Flush the output with zeros a few times.
9340 RtAudioFormat format;
9342 if ( stream_.doConvertBuffer[0] ) {
9343 buffer = stream_.deviceBuffer;
9344 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
9345 format = stream_.deviceFormat[0];
9348 buffer = stream_.userBuffer[0];
9349 samples = stream_.bufferSize * stream_.nUserChannels[0];
9350 format = stream_.userFormat;
9353 memset( buffer, 0, samples * formatBytes(format) );
9354 for ( unsigned int i=0; i<stream_.nBuffers+1; i++ ) {
9355 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9356 if ( result == -1 ) {
9357 errorText_ = "RtApiOss::stopStream: audio write error.";
9358 error( RtAudioError::WARNING );
9362 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9363 if ( result == -1 ) {
9364 errorStream_ << "RtApiOss::stopStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
9365 errorText_ = errorStream_.str();
9368 handle->triggered = false;
9371 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
9372 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9373 if ( result == -1 ) {
9374 errorStream_ << "RtApiOss::stopStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
9375 errorText_ = errorStream_.str();
9381 stream_.state = STREAM_STOPPED;
9382 MUTEX_UNLOCK( &stream_.mutex );
9384 if ( result != -1 ) return;
9385 error( RtAudioError::SYSTEM_ERROR );
9388 void RtApiOss :: abortStream()
9391 if ( stream_.state == STREAM_STOPPED ) {
9392 errorText_ = "RtApiOss::abortStream(): the stream is already stopped!";
9393 error( RtAudioError::WARNING );
9397 MUTEX_LOCK( &stream_.mutex );
9399 // The state might change while waiting on a mutex.
9400 if ( stream_.state == STREAM_STOPPED ) {
9401 MUTEX_UNLOCK( &stream_.mutex );
9406 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9407 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9408 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9409 if ( result == -1 ) {
9410 errorStream_ << "RtApiOss::abortStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
9411 errorText_ = errorStream_.str();
9414 handle->triggered = false;
9417 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
9418 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9419 if ( result == -1 ) {
9420 errorStream_ << "RtApiOss::abortStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
9421 errorText_ = errorStream_.str();
9427 stream_.state = STREAM_STOPPED;
9428 MUTEX_UNLOCK( &stream_.mutex );
9430 if ( result != -1 ) return;
9431 error( RtAudioError::SYSTEM_ERROR );
9434 void RtApiOss :: callbackEvent()
9436 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9437 if ( stream_.state == STREAM_STOPPED ) {
9438 MUTEX_LOCK( &stream_.mutex );
9439 pthread_cond_wait( &handle->runnable, &stream_.mutex );
9440 if ( stream_.state != STREAM_RUNNING ) {
9441 MUTEX_UNLOCK( &stream_.mutex );
9444 MUTEX_UNLOCK( &stream_.mutex );
9447 if ( stream_.state == STREAM_CLOSED ) {
9448 errorText_ = "RtApiOss::callbackEvent(): the stream is closed ... this shouldn't happen!";
9449 error( RtAudioError::WARNING );
9453 // Invoke user callback to get fresh output data.
9454 int doStopStream = 0;
9455 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
9456 double streamTime = getStreamTime();
9457 RtAudioStreamStatus status = 0;
9458 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
9459 status |= RTAUDIO_OUTPUT_UNDERFLOW;
9460 handle->xrun[0] = false;
9462 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
9463 status |= RTAUDIO_INPUT_OVERFLOW;
9464 handle->xrun[1] = false;
9466 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
9467 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
9468 if ( doStopStream == 2 ) {
9469 this->abortStream();
9473 MUTEX_LOCK( &stream_.mutex );
9475 // The state might change while waiting on a mutex.
9476 if ( stream_.state == STREAM_STOPPED ) goto unlock;
9481 RtAudioFormat format;
9483 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9485 // Setup parameters and do buffer conversion if necessary.
9486 if ( stream_.doConvertBuffer[0] ) {
9487 buffer = stream_.deviceBuffer;
9488 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
9489 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
9490 format = stream_.deviceFormat[0];
9493 buffer = stream_.userBuffer[0];
9494 samples = stream_.bufferSize * stream_.nUserChannels[0];
9495 format = stream_.userFormat;
9498 // Do byte swapping if necessary.
9499 if ( stream_.doByteSwap[0] )
9500 byteSwapBuffer( buffer, samples, format );
9502 if ( stream_.mode == DUPLEX && handle->triggered == false ) {
9504 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
9505 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9506 trig = PCM_ENABLE_INPUT|PCM_ENABLE_OUTPUT;
9507 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
9508 handle->triggered = true;
9511 // Write samples to device.
9512 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9514 if ( result == -1 ) {
9515 // We'll assume this is an underrun, though there isn't a
9516 // specific means for determining that.
9517 handle->xrun[0] = true;
9518 errorText_ = "RtApiOss::callbackEvent: audio write error.";
9519 error( RtAudioError::WARNING );
9520 // Continue on to input section.
9524 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
9526 // Setup parameters.
9527 if ( stream_.doConvertBuffer[1] ) {
9528 buffer = stream_.deviceBuffer;
9529 samples = stream_.bufferSize * stream_.nDeviceChannels[1];
9530 format = stream_.deviceFormat[1];
9533 buffer = stream_.userBuffer[1];
9534 samples = stream_.bufferSize * stream_.nUserChannels[1];
9535 format = stream_.userFormat;
9538 // Read samples from device.
9539 result = read( handle->id[1], buffer, samples * formatBytes(format) );
9541 if ( result == -1 ) {
9542 // We'll assume this is an overrun, though there isn't a
9543 // specific means for determining that.
9544 handle->xrun[1] = true;
9545 errorText_ = "RtApiOss::callbackEvent: audio read error.";
9546 error( RtAudioError::WARNING );
9550 // Do byte swapping if necessary.
9551 if ( stream_.doByteSwap[1] )
9552 byteSwapBuffer( buffer, samples, format );
9554 // Do buffer conversion if necessary.
9555 if ( stream_.doConvertBuffer[1] )
9556 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
9560 MUTEX_UNLOCK( &stream_.mutex );
9562 RtApi::tickStreamTime();
9563 if ( doStopStream == 1 ) this->stopStream();
9566 static void *ossCallbackHandler( void *ptr )
9568 CallbackInfo *info = (CallbackInfo *) ptr;
9569 RtApiOss *object = (RtApiOss *) info->object;
9570 bool *isRunning = &info->isRunning;
9572 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
9573 if (info->doRealtime) {
9574 std::cerr << "RtAudio oss: " <<
9575 (sched_getscheduler(0) == SCHED_RR ? "" : "_NOT_ ") <<
9576 "running realtime scheduling" << std::endl;
9580 while ( *isRunning == true ) {
9581 pthread_testcancel();
9582 object->callbackEvent();
9585 pthread_exit( NULL );
9588 //******************** End of __LINUX_OSS__ *********************//
9592 // *************************************************** //
9594 // Protected common (OS-independent) RtAudio methods.
9596 // *************************************************** //
9598 // This method can be modified to control the behavior of error
9599 // message printing.
9600 void RtApi :: error( RtAudioError::Type type )
9602 errorStream_.str(""); // clear the ostringstream
9604 RtAudioErrorCallback errorCallback = (RtAudioErrorCallback) stream_.callbackInfo.errorCallback;
9605 if ( errorCallback ) {
9606 // abortStream() can generate new error messages. Ignore them. Just keep original one.
9608 if ( firstErrorOccurred_ )
9611 firstErrorOccurred_ = true;
9612 const std::string errorMessage = errorText_;
9614 if ( type != RtAudioError::WARNING && stream_.state != STREAM_STOPPED) {
9615 stream_.callbackInfo.isRunning = false; // exit from the thread
9619 errorCallback( type, errorMessage );
9620 firstErrorOccurred_ = false;
9624 if ( type == RtAudioError::WARNING && showWarnings_ == true )
9625 std::cerr << '\n' << errorText_ << "\n\n";
9626 else if ( type != RtAudioError::WARNING )
9627 throw( RtAudioError( errorText_, type ) );
9630 void RtApi :: verifyStream()
9632 if ( stream_.state == STREAM_CLOSED ) {
9633 errorText_ = "RtApi:: a stream is not open!";
9634 error( RtAudioError::INVALID_USE );
9638 void RtApi :: clearStreamInfo()
9640 stream_.mode = UNINITIALIZED;
9641 stream_.state = STREAM_CLOSED;
9642 stream_.sampleRate = 0;
9643 stream_.bufferSize = 0;
9644 stream_.nBuffers = 0;
9645 stream_.userFormat = 0;
9646 stream_.userInterleaved = true;
9647 stream_.streamTime = 0.0;
9648 stream_.apiHandle = 0;
9649 stream_.deviceBuffer = 0;
9650 stream_.callbackInfo.callback = 0;
9651 stream_.callbackInfo.userData = 0;
9652 stream_.callbackInfo.isRunning = false;
9653 stream_.callbackInfo.errorCallback = 0;
9654 for ( int i=0; i<2; i++ ) {
9655 stream_.device[i] = 11111;
9656 stream_.doConvertBuffer[i] = false;
9657 stream_.deviceInterleaved[i] = true;
9658 stream_.doByteSwap[i] = false;
9659 stream_.nUserChannels[i] = 0;
9660 stream_.nDeviceChannels[i] = 0;
9661 stream_.channelOffset[i] = 0;
9662 stream_.deviceFormat[i] = 0;
9663 stream_.latency[i] = 0;
9664 stream_.userBuffer[i] = 0;
9665 stream_.convertInfo[i].channels = 0;
9666 stream_.convertInfo[i].inJump = 0;
9667 stream_.convertInfo[i].outJump = 0;
9668 stream_.convertInfo[i].inFormat = 0;
9669 stream_.convertInfo[i].outFormat = 0;
9670 stream_.convertInfo[i].inOffset.clear();
9671 stream_.convertInfo[i].outOffset.clear();
9675 unsigned int RtApi :: formatBytes( RtAudioFormat format )
9677 if ( format == RTAUDIO_SINT16 )
9679 else if ( format == RTAUDIO_SINT32 || format == RTAUDIO_FLOAT32 )
9681 else if ( format == RTAUDIO_FLOAT64 )
9683 else if ( format == RTAUDIO_SINT24 )
9685 else if ( format == RTAUDIO_SINT8 )
9688 errorText_ = "RtApi::formatBytes: undefined format.";
9689 error( RtAudioError::WARNING );
9694 void RtApi :: setConvertInfo( StreamMode mode, unsigned int firstChannel )
9696 if ( mode == INPUT ) { // convert device to user buffer
9697 stream_.convertInfo[mode].inJump = stream_.nDeviceChannels[1];
9698 stream_.convertInfo[mode].outJump = stream_.nUserChannels[1];
9699 stream_.convertInfo[mode].inFormat = stream_.deviceFormat[1];
9700 stream_.convertInfo[mode].outFormat = stream_.userFormat;
9702 else { // convert user to device buffer
9703 stream_.convertInfo[mode].inJump = stream_.nUserChannels[0];
9704 stream_.convertInfo[mode].outJump = stream_.nDeviceChannels[0];
9705 stream_.convertInfo[mode].inFormat = stream_.userFormat;
9706 stream_.convertInfo[mode].outFormat = stream_.deviceFormat[0];
9709 if ( stream_.convertInfo[mode].inJump < stream_.convertInfo[mode].outJump )
9710 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].inJump;
9712 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].outJump;
9714 // Set up the interleave/deinterleave offsets.
9715 if ( stream_.deviceInterleaved[mode] != stream_.userInterleaved ) {
9716 if ( ( mode == OUTPUT && stream_.deviceInterleaved[mode] ) ||
9717 ( mode == INPUT && stream_.userInterleaved ) ) {
9718 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
9719 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
9720 stream_.convertInfo[mode].outOffset.push_back( k );
9721 stream_.convertInfo[mode].inJump = 1;
9725 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
9726 stream_.convertInfo[mode].inOffset.push_back( k );
9727 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
9728 stream_.convertInfo[mode].outJump = 1;
9732 else { // no (de)interleaving
9733 if ( stream_.userInterleaved ) {
9734 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
9735 stream_.convertInfo[mode].inOffset.push_back( k );
9736 stream_.convertInfo[mode].outOffset.push_back( k );
9740 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
9741 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
9742 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
9743 stream_.convertInfo[mode].inJump = 1;
9744 stream_.convertInfo[mode].outJump = 1;
9749 // Add channel offset.
9750 if ( firstChannel > 0 ) {
9751 if ( stream_.deviceInterleaved[mode] ) {
9752 if ( mode == OUTPUT ) {
9753 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
9754 stream_.convertInfo[mode].outOffset[k] += firstChannel;
9757 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
9758 stream_.convertInfo[mode].inOffset[k] += firstChannel;
9762 if ( mode == OUTPUT ) {
9763 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
9764 stream_.convertInfo[mode].outOffset[k] += ( firstChannel * stream_.bufferSize );
9767 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
9768 stream_.convertInfo[mode].inOffset[k] += ( firstChannel * stream_.bufferSize );
9774 void RtApi :: convertBuffer( char *outBuffer, char *inBuffer, ConvertInfo &info )
9776 // This function does format conversion, input/output channel compensation, and
9777 // data interleaving/deinterleaving. 24-bit integers are assumed to occupy
9778 // the lower three bytes of a 32-bit integer.
9780 // Clear our device buffer when in/out duplex device channels are different
9781 if ( outBuffer == stream_.deviceBuffer && stream_.mode == DUPLEX &&
9782 ( stream_.nDeviceChannels[0] < stream_.nDeviceChannels[1] ) )
9783 memset( outBuffer, 0, stream_.bufferSize * info.outJump * formatBytes( info.outFormat ) );
9786 if (info.outFormat == RTAUDIO_FLOAT64) {
9788 Float64 *out = (Float64 *)outBuffer;
9790 if (info.inFormat == RTAUDIO_SINT8) {
9791 signed char *in = (signed char *)inBuffer;
9792 scale = 1.0 / 127.5;
9793 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9794 for (j=0; j<info.channels; j++) {
9795 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
9796 out[info.outOffset[j]] += 0.5;
9797 out[info.outOffset[j]] *= scale;
9800 out += info.outJump;
9803 else if (info.inFormat == RTAUDIO_SINT16) {
9804 Int16 *in = (Int16 *)inBuffer;
9805 scale = 1.0 / 32767.5;
9806 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9807 for (j=0; j<info.channels; j++) {
9808 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
9809 out[info.outOffset[j]] += 0.5;
9810 out[info.outOffset[j]] *= scale;
9813 out += info.outJump;
9816 else if (info.inFormat == RTAUDIO_SINT24) {
9817 Int24 *in = (Int24 *)inBuffer;
9818 scale = 1.0 / 8388607.5;
9819 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9820 for (j=0; j<info.channels; j++) {
9821 out[info.outOffset[j]] = (Float64) (in[info.inOffset[j]].asInt());
9822 out[info.outOffset[j]] += 0.5;
9823 out[info.outOffset[j]] *= scale;
9826 out += info.outJump;
9829 else if (info.inFormat == RTAUDIO_SINT32) {
9830 Int32 *in = (Int32 *)inBuffer;
9831 scale = 1.0 / 2147483647.5;
9832 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9833 for (j=0; j<info.channels; j++) {
9834 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
9835 out[info.outOffset[j]] += 0.5;
9836 out[info.outOffset[j]] *= scale;
9839 out += info.outJump;
9842 else if (info.inFormat == RTAUDIO_FLOAT32) {
9843 Float32 *in = (Float32 *)inBuffer;
9844 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9845 for (j=0; j<info.channels; j++) {
9846 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
9849 out += info.outJump;
9852 else if (info.inFormat == RTAUDIO_FLOAT64) {
9853 // Channel compensation and/or (de)interleaving only.
9854 Float64 *in = (Float64 *)inBuffer;
9855 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9856 for (j=0; j<info.channels; j++) {
9857 out[info.outOffset[j]] = in[info.inOffset[j]];
9860 out += info.outJump;
9864 else if (info.outFormat == RTAUDIO_FLOAT32) {
9866 Float32 *out = (Float32 *)outBuffer;
9868 if (info.inFormat == RTAUDIO_SINT8) {
9869 signed char *in = (signed char *)inBuffer;
9870 scale = (Float32) ( 1.0 / 127.5 );
9871 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9872 for (j=0; j<info.channels; j++) {
9873 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
9874 out[info.outOffset[j]] += 0.5;
9875 out[info.outOffset[j]] *= scale;
9878 out += info.outJump;
9881 else if (info.inFormat == RTAUDIO_SINT16) {
9882 Int16 *in = (Int16 *)inBuffer;
9883 scale = (Float32) ( 1.0 / 32767.5 );
9884 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9885 for (j=0; j<info.channels; j++) {
9886 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
9887 out[info.outOffset[j]] += 0.5;
9888 out[info.outOffset[j]] *= scale;
9891 out += info.outJump;
9894 else if (info.inFormat == RTAUDIO_SINT24) {
9895 Int24 *in = (Int24 *)inBuffer;
9896 scale = (Float32) ( 1.0 / 8388607.5 );
9897 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9898 for (j=0; j<info.channels; j++) {
9899 out[info.outOffset[j]] = (Float32) (in[info.inOffset[j]].asInt());
9900 out[info.outOffset[j]] += 0.5;
9901 out[info.outOffset[j]] *= scale;
9904 out += info.outJump;
9907 else if (info.inFormat == RTAUDIO_SINT32) {
9908 Int32 *in = (Int32 *)inBuffer;
9909 scale = (Float32) ( 1.0 / 2147483647.5 );
9910 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9911 for (j=0; j<info.channels; j++) {
9912 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
9913 out[info.outOffset[j]] += 0.5;
9914 out[info.outOffset[j]] *= scale;
9917 out += info.outJump;
9920 else if (info.inFormat == RTAUDIO_FLOAT32) {
9921 // Channel compensation and/or (de)interleaving only.
9922 Float32 *in = (Float32 *)inBuffer;
9923 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9924 for (j=0; j<info.channels; j++) {
9925 out[info.outOffset[j]] = in[info.inOffset[j]];
9928 out += info.outJump;
9931 else if (info.inFormat == RTAUDIO_FLOAT64) {
9932 Float64 *in = (Float64 *)inBuffer;
9933 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9934 for (j=0; j<info.channels; j++) {
9935 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
9938 out += info.outJump;
9942 else if (info.outFormat == RTAUDIO_SINT32) {
9943 Int32 *out = (Int32 *)outBuffer;
9944 if (info.inFormat == RTAUDIO_SINT8) {
9945 signed char *in = (signed char *)inBuffer;
9946 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9947 for (j=0; j<info.channels; j++) {
9948 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
9949 out[info.outOffset[j]] <<= 24;
9952 out += info.outJump;
9955 else if (info.inFormat == RTAUDIO_SINT16) {
9956 Int16 *in = (Int16 *)inBuffer;
9957 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9958 for (j=0; j<info.channels; j++) {
9959 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
9960 out[info.outOffset[j]] <<= 16;
9963 out += info.outJump;
9966 else if (info.inFormat == RTAUDIO_SINT24) {
9967 Int24 *in = (Int24 *)inBuffer;
9968 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9969 for (j=0; j<info.channels; j++) {
9970 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]].asInt();
9971 out[info.outOffset[j]] <<= 8;
9974 out += info.outJump;
9977 else if (info.inFormat == RTAUDIO_SINT32) {
9978 // Channel compensation and/or (de)interleaving only.
9979 Int32 *in = (Int32 *)inBuffer;
9980 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9981 for (j=0; j<info.channels; j++) {
9982 out[info.outOffset[j]] = in[info.inOffset[j]];
9985 out += info.outJump;
9988 else if (info.inFormat == RTAUDIO_FLOAT32) {
9989 Float32 *in = (Float32 *)inBuffer;
9990 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9991 for (j=0; j<info.channels; j++) {
9992 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
9995 out += info.outJump;
9998 else if (info.inFormat == RTAUDIO_FLOAT64) {
9999 Float64 *in = (Float64 *)inBuffer;
10000 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10001 for (j=0; j<info.channels; j++) {
10002 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
10005 out += info.outJump;
10009 else if (info.outFormat == RTAUDIO_SINT24) {
10010 Int24 *out = (Int24 *)outBuffer;
10011 if (info.inFormat == RTAUDIO_SINT8) {
10012 signed char *in = (signed char *)inBuffer;
10013 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10014 for (j=0; j<info.channels; j++) {
10015 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 16);
10016 //out[info.outOffset[j]] <<= 16;
10019 out += info.outJump;
10022 else if (info.inFormat == RTAUDIO_SINT16) {
10023 Int16 *in = (Int16 *)inBuffer;
10024 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10025 for (j=0; j<info.channels; j++) {
10026 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 8);
10027 //out[info.outOffset[j]] <<= 8;
10030 out += info.outJump;
10033 else if (info.inFormat == RTAUDIO_SINT24) {
10034 // Channel compensation and/or (de)interleaving only.
10035 Int24 *in = (Int24 *)inBuffer;
10036 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10037 for (j=0; j<info.channels; j++) {
10038 out[info.outOffset[j]] = in[info.inOffset[j]];
10041 out += info.outJump;
10044 else if (info.inFormat == RTAUDIO_SINT32) {
10045 Int32 *in = (Int32 *)inBuffer;
10046 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10047 for (j=0; j<info.channels; j++) {
10048 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] >> 8);
10049 //out[info.outOffset[j]] >>= 8;
10052 out += info.outJump;
10055 else if (info.inFormat == RTAUDIO_FLOAT32) {
10056 Float32 *in = (Float32 *)inBuffer;
10057 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10058 for (j=0; j<info.channels; j++) {
10059 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
10062 out += info.outJump;
10065 else if (info.inFormat == RTAUDIO_FLOAT64) {
10066 Float64 *in = (Float64 *)inBuffer;
10067 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10068 for (j=0; j<info.channels; j++) {
10069 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
10072 out += info.outJump;
10076 else if (info.outFormat == RTAUDIO_SINT16) {
10077 Int16 *out = (Int16 *)outBuffer;
10078 if (info.inFormat == RTAUDIO_SINT8) {
10079 signed char *in = (signed char *)inBuffer;
10080 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10081 for (j=0; j<info.channels; j++) {
10082 out[info.outOffset[j]] = (Int16) in[info.inOffset[j]];
10083 out[info.outOffset[j]] <<= 8;
10086 out += info.outJump;
10089 else if (info.inFormat == RTAUDIO_SINT16) {
10090 // Channel compensation and/or (de)interleaving only.
10091 Int16 *in = (Int16 *)inBuffer;
10092 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10093 for (j=0; j<info.channels; j++) {
10094 out[info.outOffset[j]] = in[info.inOffset[j]];
10097 out += info.outJump;
10100 else if (info.inFormat == RTAUDIO_SINT24) {
10101 Int24 *in = (Int24 *)inBuffer;
10102 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10103 for (j=0; j<info.channels; j++) {
10104 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]].asInt() >> 8);
10107 out += info.outJump;
10110 else if (info.inFormat == RTAUDIO_SINT32) {
10111 Int32 *in = (Int32 *)inBuffer;
10112 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10113 for (j=0; j<info.channels; j++) {
10114 out[info.outOffset[j]] = (Int16) ((in[info.inOffset[j]] >> 16) & 0x0000ffff);
10117 out += info.outJump;
10120 else if (info.inFormat == RTAUDIO_FLOAT32) {
10121 Float32 *in = (Float32 *)inBuffer;
10122 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10123 for (j=0; j<info.channels; j++) {
10124 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
10127 out += info.outJump;
10130 else if (info.inFormat == RTAUDIO_FLOAT64) {
10131 Float64 *in = (Float64 *)inBuffer;
10132 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10133 for (j=0; j<info.channels; j++) {
10134 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
10137 out += info.outJump;
10141 else if (info.outFormat == RTAUDIO_SINT8) {
10142 signed char *out = (signed char *)outBuffer;
10143 if (info.inFormat == RTAUDIO_SINT8) {
10144 // Channel compensation and/or (de)interleaving only.
10145 signed char *in = (signed char *)inBuffer;
10146 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10147 for (j=0; j<info.channels; j++) {
10148 out[info.outOffset[j]] = in[info.inOffset[j]];
10151 out += info.outJump;
10154 if (info.inFormat == RTAUDIO_SINT16) {
10155 Int16 *in = (Int16 *)inBuffer;
10156 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10157 for (j=0; j<info.channels; j++) {
10158 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 8) & 0x00ff);
10161 out += info.outJump;
10164 else if (info.inFormat == RTAUDIO_SINT24) {
10165 Int24 *in = (Int24 *)inBuffer;
10166 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10167 for (j=0; j<info.channels; j++) {
10168 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]].asInt() >> 16);
10171 out += info.outJump;
10174 else if (info.inFormat == RTAUDIO_SINT32) {
10175 Int32 *in = (Int32 *)inBuffer;
10176 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10177 for (j=0; j<info.channels; j++) {
10178 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 24) & 0x000000ff);
10181 out += info.outJump;
10184 else if (info.inFormat == RTAUDIO_FLOAT32) {
10185 Float32 *in = (Float32 *)inBuffer;
10186 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10187 for (j=0; j<info.channels; j++) {
10188 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
10191 out += info.outJump;
10194 else if (info.inFormat == RTAUDIO_FLOAT64) {
10195 Float64 *in = (Float64 *)inBuffer;
10196 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10197 for (j=0; j<info.channels; j++) {
10198 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
10201 out += info.outJump;
10207 //static inline uint16_t bswap_16(uint16_t x) { return (x>>8) | (x<<8); }
10208 //static inline uint32_t bswap_32(uint32_t x) { return (bswap_16(x&0xffff)<<16) | (bswap_16(x>>16)); }
10209 //static inline uint64_t bswap_64(uint64_t x) { return (((unsigned long long)bswap_32(x&0xffffffffull))<<32) | (bswap_32(x>>32)); }
10211 void RtApi :: byteSwapBuffer( char *buffer, unsigned int samples, RtAudioFormat format )
10217 if ( format == RTAUDIO_SINT16 ) {
10218 for ( unsigned int i=0; i<samples; i++ ) {
10219 // Swap 1st and 2nd bytes.
10224 // Increment 2 bytes.
10228 else if ( format == RTAUDIO_SINT32 ||
10229 format == RTAUDIO_FLOAT32 ) {
10230 for ( unsigned int i=0; i<samples; i++ ) {
10231 // Swap 1st and 4th bytes.
10236 // Swap 2nd and 3rd bytes.
10242 // Increment 3 more bytes.
10246 else if ( format == RTAUDIO_SINT24 ) {
10247 for ( unsigned int i=0; i<samples; i++ ) {
10248 // Swap 1st and 3rd bytes.
10253 // Increment 2 more bytes.
10257 else if ( format == RTAUDIO_FLOAT64 ) {
10258 for ( unsigned int i=0; i<samples; i++ ) {
10259 // Swap 1st and 8th bytes
10264 // Swap 2nd and 7th bytes
10270 // Swap 3rd and 6th bytes
10276 // Swap 4th and 5th bytes
10282 // Increment 5 more bytes.
10288 // Indentation settings for Vim and Emacs
10290 // Local Variables:
10291 // c-basic-offset: 2
10292 // indent-tabs-mode: nil
10295 // vim: et sts=2 sw=2