1 /************************************************************************/
\r
3 \brief Realtime audio i/o C++ classes.
\r
5 RtAudio provides a common API (Application Programming Interface)
\r
6 for realtime audio input/output across Linux (native ALSA, Jack,
\r
7 and OSS), Macintosh OS X (CoreAudio and Jack), and Windows
\r
8 (DirectSound, ASIO and WASAPI) operating systems.
\r
10 RtAudio WWW site: http://www.music.mcgill.ca/~gary/rtaudio/
\r
12 RtAudio: realtime audio i/o C++ classes
\r
13 Copyright (c) 2001-2014 Gary P. Scavone
\r
15 Permission is hereby granted, free of charge, to any person
\r
16 obtaining a copy of this software and associated documentation files
\r
17 (the "Software"), to deal in the Software without restriction,
\r
18 including without limitation the rights to use, copy, modify, merge,
\r
19 publish, distribute, sublicense, and/or sell copies of the Software,
\r
20 and to permit persons to whom the Software is furnished to do so,
\r
21 subject to the following conditions:
\r
23 The above copyright notice and this permission notice shall be
\r
24 included in all copies or substantial portions of the Software.
\r
26 Any person wishing to distribute modifications to the Software is
\r
27 asked to send the modifications to the original developer so that
\r
28 they can be incorporated into the canonical version. This is,
\r
29 however, not a binding provision of this license.
\r
31 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
\r
32 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
\r
33 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
\r
34 IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
\r
35 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
\r
36 CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
\r
37 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
\r
39 /************************************************************************/
\r
41 // RtAudio: Version 4.1.1
\r
43 #include "RtAudio.h"
\r
48 #include <algorithm>
\r
50 // Static variable definitions.
\r
51 const unsigned int RtApi::MAX_SAMPLE_RATES = 14;
\r
52 const unsigned int RtApi::SAMPLE_RATES[] = {
\r
53 4000, 5512, 8000, 9600, 11025, 16000, 22050,
\r
54 32000, 44100, 48000, 88200, 96000, 176400, 192000
\r
57 #if defined(__WINDOWS_DS__) || defined(__WINDOWS_ASIO__) || defined(__WINDOWS_WASAPI__)
\r
58 #define MUTEX_INITIALIZE(A) InitializeCriticalSection(A)
\r
59 #define MUTEX_DESTROY(A) DeleteCriticalSection(A)
\r
60 #define MUTEX_LOCK(A) EnterCriticalSection(A)
\r
61 #define MUTEX_UNLOCK(A) LeaveCriticalSection(A)
\r
65 static std::string convertCharPointerToStdString(const char *text)
\r
67 return std::string(text);
\r
70 static std::string convertCharPointerToStdString(const wchar_t *text)
\r
72 int length = WideCharToMultiByte(CP_UTF8, 0, text, -1, NULL, 0, NULL, NULL);
\r
73 std::string s( length-1, '\0' );
\r
74 WideCharToMultiByte(CP_UTF8, 0, text, -1, &s[0], length, NULL, NULL);
\r
78 #elif defined(__LINUX_ALSA__) || defined(__LINUX_PULSE__) || defined(__UNIX_JACK__) || defined(__LINUX_OSS__) || defined(__MACOSX_CORE__)
\r
80 #define MUTEX_INITIALIZE(A) pthread_mutex_init(A, NULL)
\r
81 #define MUTEX_DESTROY(A) pthread_mutex_destroy(A)
\r
82 #define MUTEX_LOCK(A) pthread_mutex_lock(A)
\r
83 #define MUTEX_UNLOCK(A) pthread_mutex_unlock(A)
\r
85 #define MUTEX_INITIALIZE(A) abs(*A) // dummy definitions
\r
86 #define MUTEX_DESTROY(A) abs(*A) // dummy definitions
\r
89 // *************************************************** //
\r
91 // RtAudio definitions.
\r
93 // *************************************************** //
\r
95 std::string RtAudio :: getVersion( void ) throw()
\r
97 return RTAUDIO_VERSION;
\r
100 void RtAudio :: getCompiledApi( std::vector<RtAudio::Api> &apis ) throw()
\r
104 // The order here will control the order of RtAudio's API search in
\r
105 // the constructor.
\r
106 #if defined(__UNIX_JACK__)
\r
107 apis.push_back( UNIX_JACK );
\r
109 #if defined(__LINUX_ALSA__)
\r
110 apis.push_back( LINUX_ALSA );
\r
112 #if defined(__LINUX_PULSE__)
\r
113 apis.push_back( LINUX_PULSE );
\r
115 #if defined(__LINUX_OSS__)
\r
116 apis.push_back( LINUX_OSS );
\r
118 #if defined(__WINDOWS_ASIO__)
\r
119 apis.push_back( WINDOWS_ASIO );
\r
121 #if defined(__WINDOWS_WASAPI__)
\r
122 apis.push_back( WINDOWS_WASAPI );
\r
124 #if defined(__WINDOWS_DS__)
\r
125 apis.push_back( WINDOWS_DS );
\r
127 #if defined(__MACOSX_CORE__)
\r
128 apis.push_back( MACOSX_CORE );
\r
130 #if defined(__RTAUDIO_DUMMY__)
\r
131 apis.push_back( RTAUDIO_DUMMY );
\r
135 void RtAudio :: openRtApi( RtAudio::Api api )
\r
141 #if defined(__UNIX_JACK__)
\r
142 if ( api == UNIX_JACK )
\r
143 rtapi_ = new RtApiJack();
\r
145 #if defined(__LINUX_ALSA__)
\r
146 if ( api == LINUX_ALSA )
\r
147 rtapi_ = new RtApiAlsa();
\r
149 #if defined(__LINUX_PULSE__)
\r
150 if ( api == LINUX_PULSE )
\r
151 rtapi_ = new RtApiPulse();
\r
153 #if defined(__LINUX_OSS__)
\r
154 if ( api == LINUX_OSS )
\r
155 rtapi_ = new RtApiOss();
\r
157 #if defined(__WINDOWS_ASIO__)
\r
158 if ( api == WINDOWS_ASIO )
\r
159 rtapi_ = new RtApiAsio();
\r
161 #if defined(__WINDOWS_WASAPI__)
\r
162 if ( api == WINDOWS_WASAPI )
\r
163 rtapi_ = new RtApiWasapi();
\r
165 #if defined(__WINDOWS_DS__)
\r
166 if ( api == WINDOWS_DS )
\r
167 rtapi_ = new RtApiDs();
\r
169 #if defined(__MACOSX_CORE__)
\r
170 if ( api == MACOSX_CORE )
\r
171 rtapi_ = new RtApiCore();
\r
173 #if defined(__RTAUDIO_DUMMY__)
\r
174 if ( api == RTAUDIO_DUMMY )
\r
175 rtapi_ = new RtApiDummy();
\r
179 RtAudio :: RtAudio( RtAudio::Api api )
\r
183 if ( api != UNSPECIFIED ) {
\r
184 // Attempt to open the specified API.
\r
186 if ( rtapi_ ) return;
\r
188 // No compiled support for specified API value. Issue a debug
\r
189 // warning and continue as if no API was specified.
\r
190 std::cerr << "\nRtAudio: no compiled support for specified API argument!\n" << std::endl;
\r
193 // Iterate through the compiled APIs and return as soon as we find
\r
194 // one with at least one device or we reach the end of the list.
\r
195 std::vector< RtAudio::Api > apis;
\r
196 getCompiledApi( apis );
\r
197 for ( unsigned int i=0; i<apis.size(); i++ ) {
\r
198 openRtApi( apis[i] );
\r
199 if ( rtapi_ && rtapi_->getDeviceCount() ) break;
\r
202 if ( rtapi_ ) return;
\r
204 // It should not be possible to get here because the preprocessor
\r
205 // definition __RTAUDIO_DUMMY__ is automatically defined if no
\r
206 // API-specific definitions are passed to the compiler. But just in
\r
207 // case something weird happens, we'll thow an error.
\r
208 std::string errorText = "\nRtAudio: no compiled API support found ... critical error!!\n\n";
\r
209 throw( RtAudioError( errorText, RtAudioError::UNSPECIFIED ) );
\r
212 RtAudio :: ~RtAudio() throw()
\r
218 void RtAudio :: openStream( RtAudio::StreamParameters *outputParameters,
\r
219 RtAudio::StreamParameters *inputParameters,
\r
220 RtAudioFormat format, unsigned int sampleRate,
\r
221 unsigned int *bufferFrames,
\r
222 RtAudioCallback callback, void *userData,
\r
223 RtAudio::StreamOptions *options,
\r
224 RtAudioErrorCallback errorCallback )
\r
226 return rtapi_->openStream( outputParameters, inputParameters, format,
\r
227 sampleRate, bufferFrames, callback,
\r
228 userData, options, errorCallback );
\r
231 // *************************************************** //
\r
233 // Public RtApi definitions (see end of file for
\r
234 // private or protected utility functions).
\r
236 // *************************************************** //
\r
240 stream_.state = STREAM_CLOSED;
\r
241 stream_.mode = UNINITIALIZED;
\r
242 stream_.apiHandle = 0;
\r
243 stream_.userBuffer[0] = 0;
\r
244 stream_.userBuffer[1] = 0;
\r
245 MUTEX_INITIALIZE( &stream_.mutex );
\r
246 showWarnings_ = true;
\r
247 firstErrorOccurred_ = false;
\r
252 MUTEX_DESTROY( &stream_.mutex );
\r
255 void RtApi :: openStream( RtAudio::StreamParameters *oParams,
\r
256 RtAudio::StreamParameters *iParams,
\r
257 RtAudioFormat format, unsigned int sampleRate,
\r
258 unsigned int *bufferFrames,
\r
259 RtAudioCallback callback, void *userData,
\r
260 RtAudio::StreamOptions *options,
\r
261 RtAudioErrorCallback errorCallback )
\r
263 if ( stream_.state != STREAM_CLOSED ) {
\r
264 errorText_ = "RtApi::openStream: a stream is already open!";
\r
265 error( RtAudioError::INVALID_USE );
\r
269 // Clear stream information potentially left from a previously open stream.
\r
272 if ( oParams && oParams->nChannels < 1 ) {
\r
273 errorText_ = "RtApi::openStream: a non-NULL output StreamParameters structure cannot have an nChannels value less than one.";
\r
274 error( RtAudioError::INVALID_USE );
\r
278 if ( iParams && iParams->nChannels < 1 ) {
\r
279 errorText_ = "RtApi::openStream: a non-NULL input StreamParameters structure cannot have an nChannels value less than one.";
\r
280 error( RtAudioError::INVALID_USE );
\r
284 if ( oParams == NULL && iParams == NULL ) {
\r
285 errorText_ = "RtApi::openStream: input and output StreamParameters structures are both NULL!";
\r
286 error( RtAudioError::INVALID_USE );
\r
290 if ( formatBytes(format) == 0 ) {
\r
291 errorText_ = "RtApi::openStream: 'format' parameter value is undefined.";
\r
292 error( RtAudioError::INVALID_USE );
\r
296 unsigned int nDevices = getDeviceCount();
\r
297 unsigned int oChannels = 0;
\r
299 oChannels = oParams->nChannels;
\r
300 if ( oParams->deviceId >= nDevices ) {
\r
301 errorText_ = "RtApi::openStream: output device parameter value is invalid.";
\r
302 error( RtAudioError::INVALID_USE );
\r
307 unsigned int iChannels = 0;
\r
309 iChannels = iParams->nChannels;
\r
310 if ( iParams->deviceId >= nDevices ) {
\r
311 errorText_ = "RtApi::openStream: input device parameter value is invalid.";
\r
312 error( RtAudioError::INVALID_USE );
\r
319 if ( oChannels > 0 ) {
\r
321 result = probeDeviceOpen( oParams->deviceId, OUTPUT, oChannels, oParams->firstChannel,
\r
322 sampleRate, format, bufferFrames, options );
\r
323 if ( result == false ) {
\r
324 error( RtAudioError::SYSTEM_ERROR );
\r
329 if ( iChannels > 0 ) {
\r
331 result = probeDeviceOpen( iParams->deviceId, INPUT, iChannels, iParams->firstChannel,
\r
332 sampleRate, format, bufferFrames, options );
\r
333 if ( result == false ) {
\r
334 if ( oChannels > 0 ) closeStream();
\r
335 error( RtAudioError::SYSTEM_ERROR );
\r
340 stream_.callbackInfo.callback = (void *) callback;
\r
341 stream_.callbackInfo.userData = userData;
\r
342 stream_.callbackInfo.errorCallback = (void *) errorCallback;
\r
344 if ( options ) options->numberOfBuffers = stream_.nBuffers;
\r
345 stream_.state = STREAM_STOPPED;
\r
348 unsigned int RtApi :: getDefaultInputDevice( void )
\r
350 // Should be implemented in subclasses if possible.
\r
354 unsigned int RtApi :: getDefaultOutputDevice( void )
\r
356 // Should be implemented in subclasses if possible.
\r
360 void RtApi :: closeStream( void )
\r
362 // MUST be implemented in subclasses!
\r
366 bool RtApi :: probeDeviceOpen( unsigned int /*device*/, StreamMode /*mode*/, unsigned int /*channels*/,
\r
367 unsigned int /*firstChannel*/, unsigned int /*sampleRate*/,
\r
368 RtAudioFormat /*format*/, unsigned int * /*bufferSize*/,
\r
369 RtAudio::StreamOptions * /*options*/ )
\r
371 // MUST be implemented in subclasses!
\r
375 void RtApi :: tickStreamTime( void )
\r
377 // Subclasses that do not provide their own implementation of
\r
378 // getStreamTime should call this function once per buffer I/O to
\r
379 // provide basic stream time support.
\r
381 stream_.streamTime += ( stream_.bufferSize * 1.0 / stream_.sampleRate );
\r
383 #if defined( HAVE_GETTIMEOFDAY )
\r
384 gettimeofday( &stream_.lastTickTimestamp, NULL );
\r
388 long RtApi :: getStreamLatency( void )
\r
392 long totalLatency = 0;
\r
393 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
394 totalLatency = stream_.latency[0];
\r
395 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
\r
396 totalLatency += stream_.latency[1];
\r
398 return totalLatency;
\r
401 double RtApi :: getStreamTime( void )
\r
405 #if defined( HAVE_GETTIMEOFDAY )
\r
406 // Return a very accurate estimate of the stream time by
\r
407 // adding in the elapsed time since the last tick.
\r
408 struct timeval then;
\r
409 struct timeval now;
\r
411 if ( stream_.state != STREAM_RUNNING || stream_.streamTime == 0.0 )
\r
412 return stream_.streamTime;
\r
414 gettimeofday( &now, NULL );
\r
415 then = stream_.lastTickTimestamp;
\r
416 return stream_.streamTime +
\r
417 ((now.tv_sec + 0.000001 * now.tv_usec) -
\r
418 (then.tv_sec + 0.000001 * then.tv_usec));
\r
420 return stream_.streamTime;
\r
424 void RtApi :: setStreamTime( double time )
\r
429 stream_.streamTime = time;
\r
432 unsigned int RtApi :: getStreamSampleRate( void )
\r
436 return stream_.sampleRate;
\r
440 // *************************************************** //
\r
442 // OS/API-specific methods.
\r
444 // *************************************************** //
\r
446 #if defined(__MACOSX_CORE__)
\r
448 // The OS X CoreAudio API is designed to use a separate callback
\r
449 // procedure for each of its audio devices. A single RtAudio duplex
\r
450 // stream using two different devices is supported here, though it
\r
451 // cannot be guaranteed to always behave correctly because we cannot
\r
452 // synchronize these two callbacks.
\r
454 // A property listener is installed for over/underrun information.
\r
455 // However, no functionality is currently provided to allow property
\r
456 // listeners to trigger user handlers because it is unclear what could
\r
457 // be done if a critical stream parameter (buffer size, sample rate,
\r
458 // device disconnect) notification arrived. The listeners entail
\r
459 // quite a bit of extra code and most likely, a user program wouldn't
\r
460 // be prepared for the result anyway. However, we do provide a flag
\r
461 // to the client callback function to inform of an over/underrun.
\r
463 // A structure to hold various information related to the CoreAudio API
\r
465 struct CoreHandle {
\r
466 AudioDeviceID id[2]; // device ids
\r
467 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
468 AudioDeviceIOProcID procId[2];
\r
470 UInt32 iStream[2]; // device stream index (or first if using multiple)
\r
471 UInt32 nStreams[2]; // number of streams to use
\r
473 char *deviceBuffer;
\r
474 pthread_cond_t condition;
\r
475 int drainCounter; // Tracks callback counts when draining
\r
476 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
479 :deviceBuffer(0), drainCounter(0), internalDrain(false) { nStreams[0] = 1; nStreams[1] = 1; id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
482 RtApiCore:: RtApiCore()
\r
484 #if defined( AVAILABLE_MAC_OS_X_VERSION_10_6_AND_LATER )
\r
485 // This is a largely undocumented but absolutely necessary
\r
486 // requirement starting with OS-X 10.6. If not called, queries and
\r
487 // updates to various audio device properties are not handled
\r
489 CFRunLoopRef theRunLoop = NULL;
\r
490 AudioObjectPropertyAddress property = { kAudioHardwarePropertyRunLoop,
\r
491 kAudioObjectPropertyScopeGlobal,
\r
492 kAudioObjectPropertyElementMaster };
\r
493 OSStatus result = AudioObjectSetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, sizeof(CFRunLoopRef), &theRunLoop);
\r
494 if ( result != noErr ) {
\r
495 errorText_ = "RtApiCore::RtApiCore: error setting run loop property!";
\r
496 error( RtAudioError::WARNING );
\r
501 RtApiCore :: ~RtApiCore()
\r
503 // The subclass destructor gets called before the base class
\r
504 // destructor, so close an existing stream before deallocating
\r
505 // apiDeviceId memory.
\r
506 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
509 unsigned int RtApiCore :: getDeviceCount( void )
\r
511 // Find out how many audio devices there are, if any.
\r
513 AudioObjectPropertyAddress propertyAddress = { kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
514 OSStatus result = AudioObjectGetPropertyDataSize( kAudioObjectSystemObject, &propertyAddress, 0, NULL, &dataSize );
\r
515 if ( result != noErr ) {
\r
516 errorText_ = "RtApiCore::getDeviceCount: OS-X error getting device info!";
\r
517 error( RtAudioError::WARNING );
\r
521 return dataSize / sizeof( AudioDeviceID );
\r
524 unsigned int RtApiCore :: getDefaultInputDevice( void )
\r
526 unsigned int nDevices = getDeviceCount();
\r
527 if ( nDevices <= 1 ) return 0;
\r
530 UInt32 dataSize = sizeof( AudioDeviceID );
\r
531 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultInputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
532 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
\r
533 if ( result != noErr ) {
\r
534 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device.";
\r
535 error( RtAudioError::WARNING );
\r
539 dataSize *= nDevices;
\r
540 AudioDeviceID deviceList[ nDevices ];
\r
541 property.mSelector = kAudioHardwarePropertyDevices;
\r
542 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
\r
543 if ( result != noErr ) {
\r
544 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device IDs.";
\r
545 error( RtAudioError::WARNING );
\r
549 for ( unsigned int i=0; i<nDevices; i++ )
\r
550 if ( id == deviceList[i] ) return i;
\r
552 errorText_ = "RtApiCore::getDefaultInputDevice: No default device found!";
\r
553 error( RtAudioError::WARNING );
\r
557 unsigned int RtApiCore :: getDefaultOutputDevice( void )
\r
559 unsigned int nDevices = getDeviceCount();
\r
560 if ( nDevices <= 1 ) return 0;
\r
563 UInt32 dataSize = sizeof( AudioDeviceID );
\r
564 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultOutputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
565 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
\r
566 if ( result != noErr ) {
\r
567 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device.";
\r
568 error( RtAudioError::WARNING );
\r
572 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
573 AudioDeviceID deviceList[ nDevices ];
\r
574 property.mSelector = kAudioHardwarePropertyDevices;
\r
575 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
\r
576 if ( result != noErr ) {
\r
577 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device IDs.";
\r
578 error( RtAudioError::WARNING );
\r
582 for ( unsigned int i=0; i<nDevices; i++ )
\r
583 if ( id == deviceList[i] ) return i;
\r
585 errorText_ = "RtApiCore::getDefaultOutputDevice: No default device found!";
\r
586 error( RtAudioError::WARNING );
\r
590 RtAudio::DeviceInfo RtApiCore :: getDeviceInfo( unsigned int device )
\r
592 RtAudio::DeviceInfo info;
\r
593 info.probed = false;
\r
596 unsigned int nDevices = getDeviceCount();
\r
597 if ( nDevices == 0 ) {
\r
598 errorText_ = "RtApiCore::getDeviceInfo: no devices found!";
\r
599 error( RtAudioError::INVALID_USE );
\r
603 if ( device >= nDevices ) {
\r
604 errorText_ = "RtApiCore::getDeviceInfo: device ID is invalid!";
\r
605 error( RtAudioError::INVALID_USE );
\r
609 AudioDeviceID deviceList[ nDevices ];
\r
610 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
611 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
\r
612 kAudioObjectPropertyScopeGlobal,
\r
613 kAudioObjectPropertyElementMaster };
\r
614 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
\r
615 0, NULL, &dataSize, (void *) &deviceList );
\r
616 if ( result != noErr ) {
\r
617 errorText_ = "RtApiCore::getDeviceInfo: OS-X system error getting device IDs.";
\r
618 error( RtAudioError::WARNING );
\r
622 AudioDeviceID id = deviceList[ device ];
\r
624 // Get the device name.
\r
626 CFStringRef cfname;
\r
627 dataSize = sizeof( CFStringRef );
\r
628 property.mSelector = kAudioObjectPropertyManufacturer;
\r
629 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
\r
630 if ( result != noErr ) {
\r
631 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device manufacturer.";
\r
632 errorText_ = errorStream_.str();
\r
633 error( RtAudioError::WARNING );
\r
637 //const char *mname = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
\r
638 int length = CFStringGetLength(cfname);
\r
639 char *mname = (char *)malloc(length * 3 + 1);
\r
640 #if defined( UNICODE ) || defined( _UNICODE )
\r
641 CFStringGetCString(cfname, mname, length * 3 + 1, kCFStringEncodingUTF8);
\r
643 CFStringGetCString(cfname, mname, length * 3 + 1, CFStringGetSystemEncoding());
\r
645 info.name.append( (const char *)mname, strlen(mname) );
\r
646 info.name.append( ": " );
\r
647 CFRelease( cfname );
\r
650 property.mSelector = kAudioObjectPropertyName;
\r
651 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
\r
652 if ( result != noErr ) {
\r
653 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device name.";
\r
654 errorText_ = errorStream_.str();
\r
655 error( RtAudioError::WARNING );
\r
659 //const char *name = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
\r
660 length = CFStringGetLength(cfname);
\r
661 char *name = (char *)malloc(length * 3 + 1);
\r
662 #if defined( UNICODE ) || defined( _UNICODE )
\r
663 CFStringGetCString(cfname, name, length * 3 + 1, kCFStringEncodingUTF8);
\r
665 CFStringGetCString(cfname, name, length * 3 + 1, CFStringGetSystemEncoding());
\r
667 info.name.append( (const char *)name, strlen(name) );
\r
668 CFRelease( cfname );
\r
671 // Get the output stream "configuration".
\r
672 AudioBufferList *bufferList = nil;
\r
673 property.mSelector = kAudioDevicePropertyStreamConfiguration;
\r
674 property.mScope = kAudioDevicePropertyScopeOutput;
\r
675 // property.mElement = kAudioObjectPropertyElementWildcard;
\r
677 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
678 if ( result != noErr || dataSize == 0 ) {
\r
679 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration info for device (" << device << ").";
\r
680 errorText_ = errorStream_.str();
\r
681 error( RtAudioError::WARNING );
\r
685 // Allocate the AudioBufferList.
\r
686 bufferList = (AudioBufferList *) malloc( dataSize );
\r
687 if ( bufferList == NULL ) {
\r
688 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating output AudioBufferList.";
\r
689 error( RtAudioError::WARNING );
\r
693 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
694 if ( result != noErr || dataSize == 0 ) {
\r
695 free( bufferList );
\r
696 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration for device (" << device << ").";
\r
697 errorText_ = errorStream_.str();
\r
698 error( RtAudioError::WARNING );
\r
702 // Get output channel information.
\r
703 unsigned int i, nStreams = bufferList->mNumberBuffers;
\r
704 for ( i=0; i<nStreams; i++ )
\r
705 info.outputChannels += bufferList->mBuffers[i].mNumberChannels;
\r
706 free( bufferList );
\r
708 // Get the input stream "configuration".
\r
709 property.mScope = kAudioDevicePropertyScopeInput;
\r
710 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
711 if ( result != noErr || dataSize == 0 ) {
\r
712 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration info for device (" << device << ").";
\r
713 errorText_ = errorStream_.str();
\r
714 error( RtAudioError::WARNING );
\r
718 // Allocate the AudioBufferList.
\r
719 bufferList = (AudioBufferList *) malloc( dataSize );
\r
720 if ( bufferList == NULL ) {
\r
721 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating input AudioBufferList.";
\r
722 error( RtAudioError::WARNING );
\r
726 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
727 if (result != noErr || dataSize == 0) {
\r
728 free( bufferList );
\r
729 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration for device (" << device << ").";
\r
730 errorText_ = errorStream_.str();
\r
731 error( RtAudioError::WARNING );
\r
735 // Get input channel information.
\r
736 nStreams = bufferList->mNumberBuffers;
\r
737 for ( i=0; i<nStreams; i++ )
\r
738 info.inputChannels += bufferList->mBuffers[i].mNumberChannels;
\r
739 free( bufferList );
\r
741 // If device opens for both playback and capture, we determine the channels.
\r
742 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
743 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
745 // Probe the device sample rates.
\r
746 bool isInput = false;
\r
747 if ( info.outputChannels == 0 ) isInput = true;
\r
749 // Determine the supported sample rates.
\r
750 property.mSelector = kAudioDevicePropertyAvailableNominalSampleRates;
\r
751 if ( isInput == false ) property.mScope = kAudioDevicePropertyScopeOutput;
\r
752 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
753 if ( result != kAudioHardwareNoError || dataSize == 0 ) {
\r
754 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rate info.";
\r
755 errorText_ = errorStream_.str();
\r
756 error( RtAudioError::WARNING );
\r
760 UInt32 nRanges = dataSize / sizeof( AudioValueRange );
\r
761 AudioValueRange rangeList[ nRanges ];
\r
762 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &rangeList );
\r
763 if ( result != kAudioHardwareNoError ) {
\r
764 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rates.";
\r
765 errorText_ = errorStream_.str();
\r
766 error( RtAudioError::WARNING );
\r
770 // The sample rate reporting mechanism is a bit of a mystery. It
\r
771 // seems that it can either return individual rates or a range of
\r
772 // rates. I assume that if the min / max range values are the same,
\r
773 // then that represents a single supported rate and if the min / max
\r
774 // range values are different, the device supports an arbitrary
\r
775 // range of values (though there might be multiple ranges, so we'll
\r
776 // use the most conservative range).
\r
777 Float64 minimumRate = 1.0, maximumRate = 10000000000.0;
\r
778 bool haveValueRange = false;
\r
779 info.sampleRates.clear();
\r
780 for ( UInt32 i=0; i<nRanges; i++ ) {
\r
781 if ( rangeList[i].mMinimum == rangeList[i].mMaximum ) {
\r
782 unsigned int tmpSr = (unsigned int) rangeList[i].mMinimum;
\r
783 info.sampleRates.push_back( tmpSr );
\r
785 if ( !info.preferredSampleRate || ( tmpSr <= 48000 && tmpSr > info.preferredSampleRate ) )
\r
786 info.preferredSampleRate = tmpSr;
\r
789 haveValueRange = true;
\r
790 if ( rangeList[i].mMinimum > minimumRate ) minimumRate = rangeList[i].mMinimum;
\r
791 if ( rangeList[i].mMaximum < maximumRate ) maximumRate = rangeList[i].mMaximum;
\r
795 if ( haveValueRange ) {
\r
796 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
797 if ( SAMPLE_RATES[k] >= (unsigned int) minimumRate && SAMPLE_RATES[k] <= (unsigned int) maximumRate ) {
\r
798 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
800 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
\r
801 info.preferredSampleRate = SAMPLE_RATES[k];
\r
806 // Sort and remove any redundant values
\r
807 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
\r
808 info.sampleRates.erase( unique( info.sampleRates.begin(), info.sampleRates.end() ), info.sampleRates.end() );
\r
810 if ( info.sampleRates.size() == 0 ) {
\r
811 errorStream_ << "RtApiCore::probeDeviceInfo: No supported sample rates found for device (" << device << ").";
\r
812 errorText_ = errorStream_.str();
\r
813 error( RtAudioError::WARNING );
\r
817 // CoreAudio always uses 32-bit floating point data for PCM streams.
\r
818 // Thus, any other "physical" formats supported by the device are of
\r
819 // no interest to the client.
\r
820 info.nativeFormats = RTAUDIO_FLOAT32;
\r
822 if ( info.outputChannels > 0 )
\r
823 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
\r
824 if ( info.inputChannels > 0 )
\r
825 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
\r
827 info.probed = true;
\r
831 static OSStatus callbackHandler( AudioDeviceID inDevice,
\r
832 const AudioTimeStamp* /*inNow*/,
\r
833 const AudioBufferList* inInputData,
\r
834 const AudioTimeStamp* /*inInputTime*/,
\r
835 AudioBufferList* outOutputData,
\r
836 const AudioTimeStamp* /*inOutputTime*/,
\r
837 void* infoPointer )
\r
839 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
841 RtApiCore *object = (RtApiCore *) info->object;
\r
842 if ( object->callbackEvent( inDevice, inInputData, outOutputData ) == false )
\r
843 return kAudioHardwareUnspecifiedError;
\r
845 return kAudioHardwareNoError;
\r
848 static OSStatus xrunListener( AudioObjectID /*inDevice*/,
\r
850 const AudioObjectPropertyAddress properties[],
\r
851 void* handlePointer )
\r
853 CoreHandle *handle = (CoreHandle *) handlePointer;
\r
854 for ( UInt32 i=0; i<nAddresses; i++ ) {
\r
855 if ( properties[i].mSelector == kAudioDeviceProcessorOverload ) {
\r
856 if ( properties[i].mScope == kAudioDevicePropertyScopeInput )
\r
857 handle->xrun[1] = true;
\r
859 handle->xrun[0] = true;
\r
863 return kAudioHardwareNoError;
\r
866 static OSStatus rateListener( AudioObjectID inDevice,
\r
867 UInt32 /*nAddresses*/,
\r
868 const AudioObjectPropertyAddress /*properties*/[],
\r
869 void* ratePointer )
\r
871 Float64 *rate = (Float64 *) ratePointer;
\r
872 UInt32 dataSize = sizeof( Float64 );
\r
873 AudioObjectPropertyAddress property = { kAudioDevicePropertyNominalSampleRate,
\r
874 kAudioObjectPropertyScopeGlobal,
\r
875 kAudioObjectPropertyElementMaster };
\r
876 AudioObjectGetPropertyData( inDevice, &property, 0, NULL, &dataSize, rate );
\r
877 return kAudioHardwareNoError;
\r
880 bool RtApiCore :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
881 unsigned int firstChannel, unsigned int sampleRate,
\r
882 RtAudioFormat format, unsigned int *bufferSize,
\r
883 RtAudio::StreamOptions *options )
\r
886 unsigned int nDevices = getDeviceCount();
\r
887 if ( nDevices == 0 ) {
\r
888 // This should not happen because a check is made before this function is called.
\r
889 errorText_ = "RtApiCore::probeDeviceOpen: no devices found!";
\r
893 if ( device >= nDevices ) {
\r
894 // This should not happen because a check is made before this function is called.
\r
895 errorText_ = "RtApiCore::probeDeviceOpen: device ID is invalid!";
\r
899 AudioDeviceID deviceList[ nDevices ];
\r
900 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
901 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
\r
902 kAudioObjectPropertyScopeGlobal,
\r
903 kAudioObjectPropertyElementMaster };
\r
904 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
\r
905 0, NULL, &dataSize, (void *) &deviceList );
\r
906 if ( result != noErr ) {
\r
907 errorText_ = "RtApiCore::probeDeviceOpen: OS-X system error getting device IDs.";
\r
911 AudioDeviceID id = deviceList[ device ];
\r
913 // Setup for stream mode.
\r
914 bool isInput = false;
\r
915 if ( mode == INPUT ) {
\r
917 property.mScope = kAudioDevicePropertyScopeInput;
\r
920 property.mScope = kAudioDevicePropertyScopeOutput;
\r
922 // Get the stream "configuration".
\r
923 AudioBufferList *bufferList = nil;
\r
925 property.mSelector = kAudioDevicePropertyStreamConfiguration;
\r
926 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
927 if ( result != noErr || dataSize == 0 ) {
\r
928 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration info for device (" << device << ").";
\r
929 errorText_ = errorStream_.str();
\r
933 // Allocate the AudioBufferList.
\r
934 bufferList = (AudioBufferList *) malloc( dataSize );
\r
935 if ( bufferList == NULL ) {
\r
936 errorText_ = "RtApiCore::probeDeviceOpen: memory error allocating AudioBufferList.";
\r
940 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
941 if (result != noErr || dataSize == 0) {
\r
942 free( bufferList );
\r
943 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration for device (" << device << ").";
\r
944 errorText_ = errorStream_.str();
\r
948 // Search for one or more streams that contain the desired number of
\r
949 // channels. CoreAudio devices can have an arbitrary number of
\r
950 // streams and each stream can have an arbitrary number of channels.
\r
951 // For each stream, a single buffer of interleaved samples is
\r
952 // provided. RtAudio prefers the use of one stream of interleaved
\r
953 // data or multiple consecutive single-channel streams. However, we
\r
954 // now support multiple consecutive multi-channel streams of
\r
955 // interleaved data as well.
\r
956 UInt32 iStream, offsetCounter = firstChannel;
\r
957 UInt32 nStreams = bufferList->mNumberBuffers;
\r
958 bool monoMode = false;
\r
959 bool foundStream = false;
\r
961 // First check that the device supports the requested number of
\r
963 UInt32 deviceChannels = 0;
\r
964 for ( iStream=0; iStream<nStreams; iStream++ )
\r
965 deviceChannels += bufferList->mBuffers[iStream].mNumberChannels;
\r
967 if ( deviceChannels < ( channels + firstChannel ) ) {
\r
968 free( bufferList );
\r
969 errorStream_ << "RtApiCore::probeDeviceOpen: the device (" << device << ") does not support the requested channel count.";
\r
970 errorText_ = errorStream_.str();
\r
974 // Look for a single stream meeting our needs.
\r
975 UInt32 firstStream, streamCount = 1, streamChannels = 0, channelOffset = 0;
\r
976 for ( iStream=0; iStream<nStreams; iStream++ ) {
\r
977 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
\r
978 if ( streamChannels >= channels + offsetCounter ) {
\r
979 firstStream = iStream;
\r
980 channelOffset = offsetCounter;
\r
981 foundStream = true;
\r
984 if ( streamChannels > offsetCounter ) break;
\r
985 offsetCounter -= streamChannels;
\r
988 // If we didn't find a single stream above, then we should be able
\r
989 // to meet the channel specification with multiple streams.
\r
990 if ( foundStream == false ) {
\r
992 offsetCounter = firstChannel;
\r
993 for ( iStream=0; iStream<nStreams; iStream++ ) {
\r
994 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
\r
995 if ( streamChannels > offsetCounter ) break;
\r
996 offsetCounter -= streamChannels;
\r
999 firstStream = iStream;
\r
1000 channelOffset = offsetCounter;
\r
1001 Int32 channelCounter = channels + offsetCounter - streamChannels;
\r
1003 if ( streamChannels > 1 ) monoMode = false;
\r
1004 while ( channelCounter > 0 ) {
\r
1005 streamChannels = bufferList->mBuffers[++iStream].mNumberChannels;
\r
1006 if ( streamChannels > 1 ) monoMode = false;
\r
1007 channelCounter -= streamChannels;
\r
1012 free( bufferList );
\r
1014 // Determine the buffer size.
\r
1015 AudioValueRange bufferRange;
\r
1016 dataSize = sizeof( AudioValueRange );
\r
1017 property.mSelector = kAudioDevicePropertyBufferFrameSizeRange;
\r
1018 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &bufferRange );
\r
1020 if ( result != noErr ) {
\r
1021 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting buffer size range for device (" << device << ").";
\r
1022 errorText_ = errorStream_.str();
\r
1026 if ( bufferRange.mMinimum > *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMinimum;
\r
1027 else if ( bufferRange.mMaximum < *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMaximum;
\r
1028 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) *bufferSize = (unsigned long) bufferRange.mMinimum;
\r
1030 // Set the buffer size. For multiple streams, I'm assuming we only
\r
1031 // need to make this setting for the master channel.
\r
1032 UInt32 theSize = (UInt32) *bufferSize;
\r
1033 dataSize = sizeof( UInt32 );
\r
1034 property.mSelector = kAudioDevicePropertyBufferFrameSize;
\r
1035 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &theSize );
\r
1037 if ( result != noErr ) {
\r
1038 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting the buffer size for device (" << device << ").";
\r
1039 errorText_ = errorStream_.str();
\r
1043 // If attempting to setup a duplex stream, the bufferSize parameter
\r
1044 // MUST be the same in both directions!
\r
1045 *bufferSize = theSize;
\r
1046 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
\r
1047 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << device << ").";
\r
1048 errorText_ = errorStream_.str();
\r
1052 stream_.bufferSize = *bufferSize;
\r
1053 stream_.nBuffers = 1;
\r
1055 // Try to set "hog" mode ... it's not clear to me this is working.
\r
1056 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) {
\r
1058 dataSize = sizeof( hog_pid );
\r
1059 property.mSelector = kAudioDevicePropertyHogMode;
\r
1060 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &hog_pid );
\r
1061 if ( result != noErr ) {
\r
1062 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting 'hog' state!";
\r
1063 errorText_ = errorStream_.str();
\r
1067 if ( hog_pid != getpid() ) {
\r
1068 hog_pid = getpid();
\r
1069 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &hog_pid );
\r
1070 if ( result != noErr ) {
\r
1071 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting 'hog' state!";
\r
1072 errorText_ = errorStream_.str();
\r
1078 // Check and if necessary, change the sample rate for the device.
\r
1079 Float64 nominalRate;
\r
1080 dataSize = sizeof( Float64 );
\r
1081 property.mSelector = kAudioDevicePropertyNominalSampleRate;
\r
1082 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &nominalRate );
\r
1083 if ( result != noErr ) {
\r
1084 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting current sample rate.";
\r
1085 errorText_ = errorStream_.str();
\r
1089 // Only change the sample rate if off by more than 1 Hz.
\r
1090 if ( fabs( nominalRate - (double)sampleRate ) > 1.0 ) {
\r
1092 // Set a property listener for the sample rate change
\r
1093 Float64 reportedRate = 0.0;
\r
1094 AudioObjectPropertyAddress tmp = { kAudioDevicePropertyNominalSampleRate, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
1095 result = AudioObjectAddPropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
\r
1096 if ( result != noErr ) {
\r
1097 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate property listener for device (" << device << ").";
\r
1098 errorText_ = errorStream_.str();
\r
1102 nominalRate = (Float64) sampleRate;
\r
1103 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &nominalRate );
\r
1104 if ( result != noErr ) {
\r
1105 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
\r
1106 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate for device (" << device << ").";
\r
1107 errorText_ = errorStream_.str();
\r
1111 // Now wait until the reported nominal rate is what we just set.
\r
1112 UInt32 microCounter = 0;
\r
1113 while ( reportedRate != nominalRate ) {
\r
1114 microCounter += 5000;
\r
1115 if ( microCounter > 5000000 ) break;
\r
1119 // Remove the property listener.
\r
1120 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
\r
1122 if ( microCounter > 5000000 ) {
\r
1123 errorStream_ << "RtApiCore::probeDeviceOpen: timeout waiting for sample rate update for device (" << device << ").";
\r
1124 errorText_ = errorStream_.str();
\r
1129 // Now set the stream format for all streams. Also, check the
\r
1130 // physical format of the device and change that if necessary.
\r
1131 AudioStreamBasicDescription description;
\r
1132 dataSize = sizeof( AudioStreamBasicDescription );
\r
1133 property.mSelector = kAudioStreamPropertyVirtualFormat;
\r
1134 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
\r
1135 if ( result != noErr ) {
\r
1136 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream format for device (" << device << ").";
\r
1137 errorText_ = errorStream_.str();
\r
1141 // Set the sample rate and data format id. However, only make the
\r
1142 // change if the sample rate is not within 1.0 of the desired
\r
1143 // rate and the format is not linear pcm.
\r
1144 bool updateFormat = false;
\r
1145 if ( fabs( description.mSampleRate - (Float64)sampleRate ) > 1.0 ) {
\r
1146 description.mSampleRate = (Float64) sampleRate;
\r
1147 updateFormat = true;
\r
1150 if ( description.mFormatID != kAudioFormatLinearPCM ) {
\r
1151 description.mFormatID = kAudioFormatLinearPCM;
\r
1152 updateFormat = true;
\r
1155 if ( updateFormat ) {
\r
1156 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &description );
\r
1157 if ( result != noErr ) {
\r
1158 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate or data format for device (" << device << ").";
\r
1159 errorText_ = errorStream_.str();
\r
1164 // Now check the physical format.
\r
1165 property.mSelector = kAudioStreamPropertyPhysicalFormat;
\r
1166 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
\r
1167 if ( result != noErr ) {
\r
1168 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream physical format for device (" << device << ").";
\r
1169 errorText_ = errorStream_.str();
\r
1173 //std::cout << "Current physical stream format:" << std::endl;
\r
1174 //std::cout << " mBitsPerChan = " << description.mBitsPerChannel << std::endl;
\r
1175 //std::cout << " aligned high = " << (description.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (description.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
\r
1176 //std::cout << " bytesPerFrame = " << description.mBytesPerFrame << std::endl;
\r
1177 //std::cout << " sample rate = " << description.mSampleRate << std::endl;
\r
1179 if ( description.mFormatID != kAudioFormatLinearPCM || description.mBitsPerChannel < 16 ) {
\r
1180 description.mFormatID = kAudioFormatLinearPCM;
\r
1181 //description.mSampleRate = (Float64) sampleRate;
\r
1182 AudioStreamBasicDescription testDescription = description;
\r
1183 UInt32 formatFlags;
\r
1185 // We'll try higher bit rates first and then work our way down.
\r
1186 std::vector< std::pair<UInt32, UInt32> > physicalFormats;
\r
1187 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsFloat) & ~kLinearPCMFormatFlagIsSignedInteger;
\r
1188 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
\r
1189 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
\r
1190 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
\r
1191 physicalFormats.push_back( std::pair<Float32, UInt32>( 24, formatFlags ) ); // 24-bit packed
\r
1192 formatFlags &= ~( kAudioFormatFlagIsPacked | kAudioFormatFlagIsAlignedHigh );
\r
1193 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.2, formatFlags ) ); // 24-bit in 4 bytes, aligned low
\r
1194 formatFlags |= kAudioFormatFlagIsAlignedHigh;
\r
1195 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.4, formatFlags ) ); // 24-bit in 4 bytes, aligned high
\r
1196 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
\r
1197 physicalFormats.push_back( std::pair<Float32, UInt32>( 16, formatFlags ) );
\r
1198 physicalFormats.push_back( std::pair<Float32, UInt32>( 8, formatFlags ) );
\r
1200 bool setPhysicalFormat = false;
\r
1201 for( unsigned int i=0; i<physicalFormats.size(); i++ ) {
\r
1202 testDescription = description;
\r
1203 testDescription.mBitsPerChannel = (UInt32) physicalFormats[i].first;
\r
1204 testDescription.mFormatFlags = physicalFormats[i].second;
\r
1205 if ( (24 == (UInt32)physicalFormats[i].first) && ~( physicalFormats[i].second & kAudioFormatFlagIsPacked ) )
\r
1206 testDescription.mBytesPerFrame = 4 * testDescription.mChannelsPerFrame;
\r
1208 testDescription.mBytesPerFrame = testDescription.mBitsPerChannel/8 * testDescription.mChannelsPerFrame;
\r
1209 testDescription.mBytesPerPacket = testDescription.mBytesPerFrame * testDescription.mFramesPerPacket;
\r
1210 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &testDescription );
\r
1211 if ( result == noErr ) {
\r
1212 setPhysicalFormat = true;
\r
1213 //std::cout << "Updated physical stream format:" << std::endl;
\r
1214 //std::cout << " mBitsPerChan = " << testDescription.mBitsPerChannel << std::endl;
\r
1215 //std::cout << " aligned high = " << (testDescription.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (testDescription.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
\r
1216 //std::cout << " bytesPerFrame = " << testDescription.mBytesPerFrame << std::endl;
\r
1217 //std::cout << " sample rate = " << testDescription.mSampleRate << std::endl;
\r
1222 if ( !setPhysicalFormat ) {
\r
1223 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting physical data format for device (" << device << ").";
\r
1224 errorText_ = errorStream_.str();
\r
1227 } // done setting virtual/physical formats.
\r
1229 // Get the stream / device latency.
\r
1231 dataSize = sizeof( UInt32 );
\r
1232 property.mSelector = kAudioDevicePropertyLatency;
\r
1233 if ( AudioObjectHasProperty( id, &property ) == true ) {
\r
1234 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &latency );
\r
1235 if ( result == kAudioHardwareNoError ) stream_.latency[ mode ] = latency;
\r
1237 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting device latency for device (" << device << ").";
\r
1238 errorText_ = errorStream_.str();
\r
1239 error( RtAudioError::WARNING );
\r
1243 // Byte-swapping: According to AudioHardware.h, the stream data will
\r
1244 // always be presented in native-endian format, so we should never
\r
1245 // need to byte swap.
\r
1246 stream_.doByteSwap[mode] = false;
\r
1248 // From the CoreAudio documentation, PCM data must be supplied as
\r
1250 stream_.userFormat = format;
\r
1251 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
1253 if ( streamCount == 1 )
\r
1254 stream_.nDeviceChannels[mode] = description.mChannelsPerFrame;
\r
1255 else // multiple streams
\r
1256 stream_.nDeviceChannels[mode] = channels;
\r
1257 stream_.nUserChannels[mode] = channels;
\r
1258 stream_.channelOffset[mode] = channelOffset; // offset within a CoreAudio stream
\r
1259 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
1260 else stream_.userInterleaved = true;
\r
1261 stream_.deviceInterleaved[mode] = true;
\r
1262 if ( monoMode == true ) stream_.deviceInterleaved[mode] = false;
\r
1264 // Set flags for buffer conversion.
\r
1265 stream_.doConvertBuffer[mode] = false;
\r
1266 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
1267 stream_.doConvertBuffer[mode] = true;
\r
1268 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
1269 stream_.doConvertBuffer[mode] = true;
\r
1270 if ( streamCount == 1 ) {
\r
1271 if ( stream_.nUserChannels[mode] > 1 &&
\r
1272 stream_.userInterleaved != stream_.deviceInterleaved[mode] )
\r
1273 stream_.doConvertBuffer[mode] = true;
\r
1275 else if ( monoMode && stream_.userInterleaved )
\r
1276 stream_.doConvertBuffer[mode] = true;
\r
1278 // Allocate our CoreHandle structure for the stream.
\r
1279 CoreHandle *handle = 0;
\r
1280 if ( stream_.apiHandle == 0 ) {
\r
1282 handle = new CoreHandle;
\r
1284 catch ( std::bad_alloc& ) {
\r
1285 errorText_ = "RtApiCore::probeDeviceOpen: error allocating CoreHandle memory.";
\r
1289 if ( pthread_cond_init( &handle->condition, NULL ) ) {
\r
1290 errorText_ = "RtApiCore::probeDeviceOpen: error initializing pthread condition variable.";
\r
1293 stream_.apiHandle = (void *) handle;
\r
1296 handle = (CoreHandle *) stream_.apiHandle;
\r
1297 handle->iStream[mode] = firstStream;
\r
1298 handle->nStreams[mode] = streamCount;
\r
1299 handle->id[mode] = id;
\r
1301 // Allocate necessary internal buffers.
\r
1302 unsigned long bufferBytes;
\r
1303 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
1304 // stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
1305 stream_.userBuffer[mode] = (char *) malloc( bufferBytes * sizeof(char) );
\r
1306 memset( stream_.userBuffer[mode], 0, bufferBytes * sizeof(char) );
\r
1307 if ( stream_.userBuffer[mode] == NULL ) {
\r
1308 errorText_ = "RtApiCore::probeDeviceOpen: error allocating user buffer memory.";
\r
1312 // If possible, we will make use of the CoreAudio stream buffers as
\r
1313 // "device buffers". However, we can't do this if using multiple
\r
1315 if ( stream_.doConvertBuffer[mode] && handle->nStreams[mode] > 1 ) {
\r
1317 bool makeBuffer = true;
\r
1318 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
1319 if ( mode == INPUT ) {
\r
1320 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
1321 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
1322 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
1326 if ( makeBuffer ) {
\r
1327 bufferBytes *= *bufferSize;
\r
1328 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
1329 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
1330 if ( stream_.deviceBuffer == NULL ) {
\r
1331 errorText_ = "RtApiCore::probeDeviceOpen: error allocating device buffer memory.";
\r
1337 stream_.sampleRate = sampleRate;
\r
1338 stream_.device[mode] = device;
\r
1339 stream_.state = STREAM_STOPPED;
\r
1340 stream_.callbackInfo.object = (void *) this;
\r
1342 // Setup the buffer conversion information structure.
\r
1343 if ( stream_.doConvertBuffer[mode] ) {
\r
1344 if ( streamCount > 1 ) setConvertInfo( mode, 0 );
\r
1345 else setConvertInfo( mode, channelOffset );
\r
1348 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device )
\r
1349 // Only one callback procedure per device.
\r
1350 stream_.mode = DUPLEX;
\r
1352 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1353 result = AudioDeviceCreateIOProcID( id, callbackHandler, (void *) &stream_.callbackInfo, &handle->procId[mode] );
\r
1355 // deprecated in favor of AudioDeviceCreateIOProcID()
\r
1356 result = AudioDeviceAddIOProc( id, callbackHandler, (void *) &stream_.callbackInfo );
\r
1358 if ( result != noErr ) {
\r
1359 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting callback for device (" << device << ").";
\r
1360 errorText_ = errorStream_.str();
\r
1363 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
1364 stream_.mode = DUPLEX;
\r
1366 stream_.mode = mode;
\r
1369 // Setup the device property listener for over/underload.
\r
1370 property.mSelector = kAudioDeviceProcessorOverload;
\r
1371 property.mScope = kAudioObjectPropertyScopeGlobal;
\r
1372 result = AudioObjectAddPropertyListener( id, &property, xrunListener, (void *) handle );
\r
1378 pthread_cond_destroy( &handle->condition );
\r
1380 stream_.apiHandle = 0;
\r
1383 for ( int i=0; i<2; i++ ) {
\r
1384 if ( stream_.userBuffer[i] ) {
\r
1385 free( stream_.userBuffer[i] );
\r
1386 stream_.userBuffer[i] = 0;
\r
1390 if ( stream_.deviceBuffer ) {
\r
1391 free( stream_.deviceBuffer );
\r
1392 stream_.deviceBuffer = 0;
\r
1395 stream_.state = STREAM_CLOSED;
\r
1399 void RtApiCore :: closeStream( void )
\r
1401 if ( stream_.state == STREAM_CLOSED ) {
\r
1402 errorText_ = "RtApiCore::closeStream(): no open stream to close!";
\r
1403 error( RtAudioError::WARNING );
\r
1407 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1408 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1409 if ( stream_.state == STREAM_RUNNING )
\r
1410 AudioDeviceStop( handle->id[0], callbackHandler );
\r
1411 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1412 AudioDeviceDestroyIOProcID( handle->id[0], handle->procId[0] );
\r
1414 // deprecated in favor of AudioDeviceDestroyIOProcID()
\r
1415 AudioDeviceRemoveIOProc( handle->id[0], callbackHandler );
\r
1419 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1420 if ( stream_.state == STREAM_RUNNING )
\r
1421 AudioDeviceStop( handle->id[1], callbackHandler );
\r
1422 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1423 AudioDeviceDestroyIOProcID( handle->id[1], handle->procId[1] );
\r
1425 // deprecated in favor of AudioDeviceDestroyIOProcID()
\r
1426 AudioDeviceRemoveIOProc( handle->id[1], callbackHandler );
\r
1430 for ( int i=0; i<2; i++ ) {
\r
1431 if ( stream_.userBuffer[i] ) {
\r
1432 free( stream_.userBuffer[i] );
\r
1433 stream_.userBuffer[i] = 0;
\r
1437 if ( stream_.deviceBuffer ) {
\r
1438 free( stream_.deviceBuffer );
\r
1439 stream_.deviceBuffer = 0;
\r
1442 // Destroy pthread condition variable.
\r
1443 pthread_cond_destroy( &handle->condition );
\r
1445 stream_.apiHandle = 0;
\r
1447 stream_.mode = UNINITIALIZED;
\r
1448 stream_.state = STREAM_CLOSED;
\r
1451 void RtApiCore :: startStream( void )
\r
1454 if ( stream_.state == STREAM_RUNNING ) {
\r
1455 errorText_ = "RtApiCore::startStream(): the stream is already running!";
\r
1456 error( RtAudioError::WARNING );
\r
1460 OSStatus result = noErr;
\r
1461 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1462 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1464 result = AudioDeviceStart( handle->id[0], callbackHandler );
\r
1465 if ( result != noErr ) {
\r
1466 errorStream_ << "RtApiCore::startStream: system error (" << getErrorCode( result ) << ") starting callback procedure on device (" << stream_.device[0] << ").";
\r
1467 errorText_ = errorStream_.str();
\r
1472 if ( stream_.mode == INPUT ||
\r
1473 ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1475 result = AudioDeviceStart( handle->id[1], callbackHandler );
\r
1476 if ( result != noErr ) {
\r
1477 errorStream_ << "RtApiCore::startStream: system error starting input callback procedure on device (" << stream_.device[1] << ").";
\r
1478 errorText_ = errorStream_.str();
\r
1483 handle->drainCounter = 0;
\r
1484 handle->internalDrain = false;
\r
1485 stream_.state = STREAM_RUNNING;
\r
1488 if ( result == noErr ) return;
\r
1489 error( RtAudioError::SYSTEM_ERROR );
\r
1492 void RtApiCore :: stopStream( void )
\r
1495 if ( stream_.state == STREAM_STOPPED ) {
\r
1496 errorText_ = "RtApiCore::stopStream(): the stream is already stopped!";
\r
1497 error( RtAudioError::WARNING );
\r
1501 OSStatus result = noErr;
\r
1502 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1503 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1505 if ( handle->drainCounter == 0 ) {
\r
1506 handle->drainCounter = 2;
\r
1507 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
\r
1510 result = AudioDeviceStop( handle->id[0], callbackHandler );
\r
1511 if ( result != noErr ) {
\r
1512 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping callback procedure on device (" << stream_.device[0] << ").";
\r
1513 errorText_ = errorStream_.str();
\r
1518 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1520 result = AudioDeviceStop( handle->id[1], callbackHandler );
\r
1521 if ( result != noErr ) {
\r
1522 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping input callback procedure on device (" << stream_.device[1] << ").";
\r
1523 errorText_ = errorStream_.str();
\r
1528 stream_.state = STREAM_STOPPED;
\r
1531 if ( result == noErr ) return;
\r
1532 error( RtAudioError::SYSTEM_ERROR );
\r
1535 void RtApiCore :: abortStream( void )
\r
1538 if ( stream_.state == STREAM_STOPPED ) {
\r
1539 errorText_ = "RtApiCore::abortStream(): the stream is already stopped!";
\r
1540 error( RtAudioError::WARNING );
\r
1544 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1545 handle->drainCounter = 2;
\r
1550 // This function will be called by a spawned thread when the user
\r
1551 // callback function signals that the stream should be stopped or
\r
1552 // aborted. It is better to handle it this way because the
\r
1553 // callbackEvent() function probably should return before the AudioDeviceStop()
\r
1554 // function is called.
\r
1555 static void *coreStopStream( void *ptr )
\r
1557 CallbackInfo *info = (CallbackInfo *) ptr;
\r
1558 RtApiCore *object = (RtApiCore *) info->object;
\r
1560 object->stopStream();
\r
1561 pthread_exit( NULL );
\r
1564 bool RtApiCore :: callbackEvent( AudioDeviceID deviceId,
\r
1565 const AudioBufferList *inBufferList,
\r
1566 const AudioBufferList *outBufferList )
\r
1568 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
\r
1569 if ( stream_.state == STREAM_CLOSED ) {
\r
1570 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
1571 error( RtAudioError::WARNING );
\r
1575 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
1576 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1578 // Check if we were draining the stream and signal is finished.
\r
1579 if ( handle->drainCounter > 3 ) {
\r
1580 ThreadHandle threadId;
\r
1582 stream_.state = STREAM_STOPPING;
\r
1583 if ( handle->internalDrain == true )
\r
1584 pthread_create( &threadId, NULL, coreStopStream, info );
\r
1585 else // external call to stopStream()
\r
1586 pthread_cond_signal( &handle->condition );
\r
1590 AudioDeviceID outputDevice = handle->id[0];
\r
1592 // Invoke user callback to get fresh output data UNLESS we are
\r
1593 // draining stream or duplex mode AND the input/output devices are
\r
1594 // different AND this function is called for the input device.
\r
1595 if ( handle->drainCounter == 0 && ( stream_.mode != DUPLEX || deviceId == outputDevice ) ) {
\r
1596 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
1597 double streamTime = getStreamTime();
\r
1598 RtAudioStreamStatus status = 0;
\r
1599 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
1600 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
1601 handle->xrun[0] = false;
\r
1603 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
1604 status |= RTAUDIO_INPUT_OVERFLOW;
\r
1605 handle->xrun[1] = false;
\r
1608 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
1609 stream_.bufferSize, streamTime, status, info->userData );
\r
1610 if ( cbReturnValue == 2 ) {
\r
1611 stream_.state = STREAM_STOPPING;
\r
1612 handle->drainCounter = 2;
\r
1616 else if ( cbReturnValue == 1 ) {
\r
1617 handle->drainCounter = 1;
\r
1618 handle->internalDrain = true;
\r
1622 if ( stream_.mode == OUTPUT || ( stream_.mode == DUPLEX && deviceId == outputDevice ) ) {
\r
1624 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
1626 if ( handle->nStreams[0] == 1 ) {
\r
1627 memset( outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1629 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
\r
1631 else { // fill multiple streams with zeros
\r
1632 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
\r
1633 memset( outBufferList->mBuffers[handle->iStream[0]+i].mData,
\r
1635 outBufferList->mBuffers[handle->iStream[0]+i].mDataByteSize );
\r
1639 else if ( handle->nStreams[0] == 1 ) {
\r
1640 if ( stream_.doConvertBuffer[0] ) { // convert directly to CoreAudio stream buffer
\r
1641 convertBuffer( (char *) outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1642 stream_.userBuffer[0], stream_.convertInfo[0] );
\r
1644 else { // copy from user buffer
\r
1645 memcpy( outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1646 stream_.userBuffer[0],
\r
1647 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
\r
1650 else { // fill multiple streams
\r
1651 Float32 *inBuffer = (Float32 *) stream_.userBuffer[0];
\r
1652 if ( stream_.doConvertBuffer[0] ) {
\r
1653 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
1654 inBuffer = (Float32 *) stream_.deviceBuffer;
\r
1657 if ( stream_.deviceInterleaved[0] == false ) { // mono mode
\r
1658 UInt32 bufferBytes = outBufferList->mBuffers[handle->iStream[0]].mDataByteSize;
\r
1659 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
1660 memcpy( outBufferList->mBuffers[handle->iStream[0]+i].mData,
\r
1661 (void *)&inBuffer[i*stream_.bufferSize], bufferBytes );
\r
1664 else { // fill multiple multi-channel streams with interleaved data
\r
1665 UInt32 streamChannels, channelsLeft, inJump, outJump, inOffset;
\r
1666 Float32 *out, *in;
\r
1668 bool inInterleaved = ( stream_.userInterleaved ) ? true : false;
\r
1669 UInt32 inChannels = stream_.nUserChannels[0];
\r
1670 if ( stream_.doConvertBuffer[0] ) {
\r
1671 inInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
\r
1672 inChannels = stream_.nDeviceChannels[0];
\r
1675 if ( inInterleaved ) inOffset = 1;
\r
1676 else inOffset = stream_.bufferSize;
\r
1678 channelsLeft = inChannels;
\r
1679 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
\r
1681 out = (Float32 *) outBufferList->mBuffers[handle->iStream[0]+i].mData;
\r
1682 streamChannels = outBufferList->mBuffers[handle->iStream[0]+i].mNumberChannels;
\r
1685 // Account for possible channel offset in first stream
\r
1686 if ( i == 0 && stream_.channelOffset[0] > 0 ) {
\r
1687 streamChannels -= stream_.channelOffset[0];
\r
1688 outJump = stream_.channelOffset[0];
\r
1692 // Account for possible unfilled channels at end of the last stream
\r
1693 if ( streamChannels > channelsLeft ) {
\r
1694 outJump = streamChannels - channelsLeft;
\r
1695 streamChannels = channelsLeft;
\r
1698 // Determine input buffer offsets and skips
\r
1699 if ( inInterleaved ) {
\r
1700 inJump = inChannels;
\r
1701 in += inChannels - channelsLeft;
\r
1705 in += (inChannels - channelsLeft) * inOffset;
\r
1708 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
\r
1709 for ( unsigned int j=0; j<streamChannels; j++ ) {
\r
1710 *out++ = in[j*inOffset];
\r
1715 channelsLeft -= streamChannels;
\r
1721 // Don't bother draining input
\r
1722 if ( handle->drainCounter ) {
\r
1723 handle->drainCounter++;
\r
1727 AudioDeviceID inputDevice;
\r
1728 inputDevice = handle->id[1];
\r
1729 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && deviceId == inputDevice ) ) {
\r
1731 if ( handle->nStreams[1] == 1 ) {
\r
1732 if ( stream_.doConvertBuffer[1] ) { // convert directly from CoreAudio stream buffer
\r
1733 convertBuffer( stream_.userBuffer[1],
\r
1734 (char *) inBufferList->mBuffers[handle->iStream[1]].mData,
\r
1735 stream_.convertInfo[1] );
\r
1737 else { // copy to user buffer
\r
1738 memcpy( stream_.userBuffer[1],
\r
1739 inBufferList->mBuffers[handle->iStream[1]].mData,
\r
1740 inBufferList->mBuffers[handle->iStream[1]].mDataByteSize );
\r
1743 else { // read from multiple streams
\r
1744 Float32 *outBuffer = (Float32 *) stream_.userBuffer[1];
\r
1745 if ( stream_.doConvertBuffer[1] ) outBuffer = (Float32 *) stream_.deviceBuffer;
\r
1747 if ( stream_.deviceInterleaved[1] == false ) { // mono mode
\r
1748 UInt32 bufferBytes = inBufferList->mBuffers[handle->iStream[1]].mDataByteSize;
\r
1749 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
1750 memcpy( (void *)&outBuffer[i*stream_.bufferSize],
\r
1751 inBufferList->mBuffers[handle->iStream[1]+i].mData, bufferBytes );
\r
1754 else { // read from multiple multi-channel streams
\r
1755 UInt32 streamChannels, channelsLeft, inJump, outJump, outOffset;
\r
1756 Float32 *out, *in;
\r
1758 bool outInterleaved = ( stream_.userInterleaved ) ? true : false;
\r
1759 UInt32 outChannels = stream_.nUserChannels[1];
\r
1760 if ( stream_.doConvertBuffer[1] ) {
\r
1761 outInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
\r
1762 outChannels = stream_.nDeviceChannels[1];
\r
1765 if ( outInterleaved ) outOffset = 1;
\r
1766 else outOffset = stream_.bufferSize;
\r
1768 channelsLeft = outChannels;
\r
1769 for ( unsigned int i=0; i<handle->nStreams[1]; i++ ) {
\r
1771 in = (Float32 *) inBufferList->mBuffers[handle->iStream[1]+i].mData;
\r
1772 streamChannels = inBufferList->mBuffers[handle->iStream[1]+i].mNumberChannels;
\r
1775 // Account for possible channel offset in first stream
\r
1776 if ( i == 0 && stream_.channelOffset[1] > 0 ) {
\r
1777 streamChannels -= stream_.channelOffset[1];
\r
1778 inJump = stream_.channelOffset[1];
\r
1782 // Account for possible unread channels at end of the last stream
\r
1783 if ( streamChannels > channelsLeft ) {
\r
1784 inJump = streamChannels - channelsLeft;
\r
1785 streamChannels = channelsLeft;
\r
1788 // Determine output buffer offsets and skips
\r
1789 if ( outInterleaved ) {
\r
1790 outJump = outChannels;
\r
1791 out += outChannels - channelsLeft;
\r
1795 out += (outChannels - channelsLeft) * outOffset;
\r
1798 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
\r
1799 for ( unsigned int j=0; j<streamChannels; j++ ) {
\r
1800 out[j*outOffset] = *in++;
\r
1805 channelsLeft -= streamChannels;
\r
1809 if ( stream_.doConvertBuffer[1] ) { // convert from our internal "device" buffer
\r
1810 convertBuffer( stream_.userBuffer[1],
\r
1811 stream_.deviceBuffer,
\r
1812 stream_.convertInfo[1] );
\r
1818 //MUTEX_UNLOCK( &stream_.mutex );
\r
1820 RtApi::tickStreamTime();
\r
1824 const char* RtApiCore :: getErrorCode( OSStatus code )
\r
1828 case kAudioHardwareNotRunningError:
\r
1829 return "kAudioHardwareNotRunningError";
\r
1831 case kAudioHardwareUnspecifiedError:
\r
1832 return "kAudioHardwareUnspecifiedError";
\r
1834 case kAudioHardwareUnknownPropertyError:
\r
1835 return "kAudioHardwareUnknownPropertyError";
\r
1837 case kAudioHardwareBadPropertySizeError:
\r
1838 return "kAudioHardwareBadPropertySizeError";
\r
1840 case kAudioHardwareIllegalOperationError:
\r
1841 return "kAudioHardwareIllegalOperationError";
\r
1843 case kAudioHardwareBadObjectError:
\r
1844 return "kAudioHardwareBadObjectError";
\r
1846 case kAudioHardwareBadDeviceError:
\r
1847 return "kAudioHardwareBadDeviceError";
\r
1849 case kAudioHardwareBadStreamError:
\r
1850 return "kAudioHardwareBadStreamError";
\r
1852 case kAudioHardwareUnsupportedOperationError:
\r
1853 return "kAudioHardwareUnsupportedOperationError";
\r
1855 case kAudioDeviceUnsupportedFormatError:
\r
1856 return "kAudioDeviceUnsupportedFormatError";
\r
1858 case kAudioDevicePermissionsError:
\r
1859 return "kAudioDevicePermissionsError";
\r
1862 return "CoreAudio unknown error";
\r
1866 //******************** End of __MACOSX_CORE__ *********************//
\r
1869 #if defined(__UNIX_JACK__)
\r
1871 // JACK is a low-latency audio server, originally written for the
\r
1872 // GNU/Linux operating system and now also ported to OS-X. It can
\r
1873 // connect a number of different applications to an audio device, as
\r
1874 // well as allowing them to share audio between themselves.
\r
1876 // When using JACK with RtAudio, "devices" refer to JACK clients that
\r
1877 // have ports connected to the server. The JACK server is typically
\r
1878 // started in a terminal as follows:
\r
1880 // .jackd -d alsa -d hw:0
\r
1882 // or through an interface program such as qjackctl. Many of the
\r
1883 // parameters normally set for a stream are fixed by the JACK server
\r
1884 // and can be specified when the JACK server is started. In
\r
1887 // .jackd -d alsa -d hw:0 -r 44100 -p 512 -n 4
\r
1889 // specifies a sample rate of 44100 Hz, a buffer size of 512 sample
\r
1890 // frames, and number of buffers = 4. Once the server is running, it
\r
1891 // is not possible to override these values. If the values are not
\r
1892 // specified in the command-line, the JACK server uses default values.
\r
1894 // The JACK server does not have to be running when an instance of
\r
1895 // RtApiJack is created, though the function getDeviceCount() will
\r
1896 // report 0 devices found until JACK has been started. When no
\r
1897 // devices are available (i.e., the JACK server is not running), a
\r
1898 // stream cannot be opened.
\r
1900 #include <jack/jack.h>
\r
1901 #include <unistd.h>
\r
1904 // A structure to hold various information related to the Jack API
\r
1905 // implementation.
\r
1906 struct JackHandle {
\r
1907 jack_client_t *client;
\r
1908 jack_port_t **ports[2];
\r
1909 std::string deviceName[2];
\r
1911 pthread_cond_t condition;
\r
1912 int drainCounter; // Tracks callback counts when draining
\r
1913 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
1916 :client(0), drainCounter(0), internalDrain(false) { ports[0] = 0; ports[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
1919 static void jackSilentError( const char * ) {};
\r
1921 RtApiJack :: RtApiJack()
\r
1923 // Nothing to do here.
\r
1924 #if !defined(__RTAUDIO_DEBUG__)
\r
1925 // Turn off Jack's internal error reporting.
\r
1926 jack_set_error_function( &jackSilentError );
\r
1930 RtApiJack :: ~RtApiJack()
\r
1932 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
1935 unsigned int RtApiJack :: getDeviceCount( void )
\r
1937 // See if we can become a jack client.
\r
1938 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
\r
1939 jack_status_t *status = NULL;
\r
1940 jack_client_t *client = jack_client_open( "RtApiJackCount", options, status );
\r
1941 if ( client == 0 ) return 0;
\r
1943 const char **ports;
\r
1944 std::string port, previousPort;
\r
1945 unsigned int nChannels = 0, nDevices = 0;
\r
1946 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
1948 // Parse the port names up to the first colon (:).
\r
1949 size_t iColon = 0;
\r
1951 port = (char *) ports[ nChannels ];
\r
1952 iColon = port.find(":");
\r
1953 if ( iColon != std::string::npos ) {
\r
1954 port = port.substr( 0, iColon + 1 );
\r
1955 if ( port != previousPort ) {
\r
1957 previousPort = port;
\r
1960 } while ( ports[++nChannels] );
\r
1964 jack_client_close( client );
\r
1968 RtAudio::DeviceInfo RtApiJack :: getDeviceInfo( unsigned int device )
\r
1970 RtAudio::DeviceInfo info;
\r
1971 info.probed = false;
\r
1973 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption
\r
1974 jack_status_t *status = NULL;
\r
1975 jack_client_t *client = jack_client_open( "RtApiJackInfo", options, status );
\r
1976 if ( client == 0 ) {
\r
1977 errorText_ = "RtApiJack::getDeviceInfo: Jack server not found or connection error!";
\r
1978 error( RtAudioError::WARNING );
\r
1982 const char **ports;
\r
1983 std::string port, previousPort;
\r
1984 unsigned int nPorts = 0, nDevices = 0;
\r
1985 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
1987 // Parse the port names up to the first colon (:).
\r
1988 size_t iColon = 0;
\r
1990 port = (char *) ports[ nPorts ];
\r
1991 iColon = port.find(":");
\r
1992 if ( iColon != std::string::npos ) {
\r
1993 port = port.substr( 0, iColon );
\r
1994 if ( port != previousPort ) {
\r
1995 if ( nDevices == device ) info.name = port;
\r
1997 previousPort = port;
\r
2000 } while ( ports[++nPorts] );
\r
2004 if ( device >= nDevices ) {
\r
2005 jack_client_close( client );
\r
2006 errorText_ = "RtApiJack::getDeviceInfo: device ID is invalid!";
\r
2007 error( RtAudioError::INVALID_USE );
\r
2011 // Get the current jack server sample rate.
\r
2012 info.sampleRates.clear();
\r
2014 info.preferredSampleRate = jack_get_sample_rate( client );
\r
2015 info.sampleRates.push_back( info.preferredSampleRate );
\r
2017 // Count the available ports containing the client name as device
\r
2018 // channels. Jack "input ports" equal RtAudio output channels.
\r
2019 unsigned int nChannels = 0;
\r
2020 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsInput );
\r
2022 while ( ports[ nChannels ] ) nChannels++;
\r
2024 info.outputChannels = nChannels;
\r
2027 // Jack "output ports" equal RtAudio input channels.
\r
2029 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsOutput );
\r
2031 while ( ports[ nChannels ] ) nChannels++;
\r
2033 info.inputChannels = nChannels;
\r
2036 if ( info.outputChannels == 0 && info.inputChannels == 0 ) {
\r
2037 jack_client_close(client);
\r
2038 errorText_ = "RtApiJack::getDeviceInfo: error determining Jack input/output channels!";
\r
2039 error( RtAudioError::WARNING );
\r
2043 // If device opens for both playback and capture, we determine the channels.
\r
2044 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
2045 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
2047 // Jack always uses 32-bit floats.
\r
2048 info.nativeFormats = RTAUDIO_FLOAT32;
\r
2050 // Jack doesn't provide default devices so we'll use the first available one.
\r
2051 if ( device == 0 && info.outputChannels > 0 )
\r
2052 info.isDefaultOutput = true;
\r
2053 if ( device == 0 && info.inputChannels > 0 )
\r
2054 info.isDefaultInput = true;
\r
2056 jack_client_close(client);
\r
2057 info.probed = true;
\r
2061 static int jackCallbackHandler( jack_nframes_t nframes, void *infoPointer )
\r
2063 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
2065 RtApiJack *object = (RtApiJack *) info->object;
\r
2066 if ( object->callbackEvent( (unsigned long) nframes ) == false ) return 1;
\r
2071 // This function will be called by a spawned thread when the Jack
\r
2072 // server signals that it is shutting down. It is necessary to handle
\r
2073 // it this way because the jackShutdown() function must return before
\r
2074 // the jack_deactivate() function (in closeStream()) will return.
\r
2075 static void *jackCloseStream( void *ptr )
\r
2077 CallbackInfo *info = (CallbackInfo *) ptr;
\r
2078 RtApiJack *object = (RtApiJack *) info->object;
\r
2080 object->closeStream();
\r
2082 pthread_exit( NULL );
\r
2084 static void jackShutdown( void *infoPointer )
\r
2086 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
2087 RtApiJack *object = (RtApiJack *) info->object;
\r
2089 // Check current stream state. If stopped, then we'll assume this
\r
2090 // was called as a result of a call to RtApiJack::stopStream (the
\r
2091 // deactivation of a client handle causes this function to be called).
\r
2092 // If not, we'll assume the Jack server is shutting down or some
\r
2093 // other problem occurred and we should close the stream.
\r
2094 if ( object->isStreamRunning() == false ) return;
\r
2096 ThreadHandle threadId;
\r
2097 pthread_create( &threadId, NULL, jackCloseStream, info );
\r
2098 std::cerr << "\nRtApiJack: the Jack server is shutting down this client ... stream stopped and closed!!\n" << std::endl;
\r
2101 static int jackXrun( void *infoPointer )
\r
2103 JackHandle *handle = (JackHandle *) infoPointer;
\r
2105 if ( handle->ports[0] ) handle->xrun[0] = true;
\r
2106 if ( handle->ports[1] ) handle->xrun[1] = true;
\r
2111 bool RtApiJack :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
2112 unsigned int firstChannel, unsigned int sampleRate,
\r
2113 RtAudioFormat format, unsigned int *bufferSize,
\r
2114 RtAudio::StreamOptions *options )
\r
2116 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2118 // Look for jack server and try to become a client (only do once per stream).
\r
2119 jack_client_t *client = 0;
\r
2120 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) {
\r
2121 jack_options_t jackoptions = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
\r
2122 jack_status_t *status = NULL;
\r
2123 if ( options && !options->streamName.empty() )
\r
2124 client = jack_client_open( options->streamName.c_str(), jackoptions, status );
\r
2126 client = jack_client_open( "RtApiJack", jackoptions, status );
\r
2127 if ( client == 0 ) {
\r
2128 errorText_ = "RtApiJack::probeDeviceOpen: Jack server not found or connection error!";
\r
2129 error( RtAudioError::WARNING );
\r
2134 // The handle must have been created on an earlier pass.
\r
2135 client = handle->client;
\r
2138 const char **ports;
\r
2139 std::string port, previousPort, deviceName;
\r
2140 unsigned int nPorts = 0, nDevices = 0;
\r
2141 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
2143 // Parse the port names up to the first colon (:).
\r
2144 size_t iColon = 0;
\r
2146 port = (char *) ports[ nPorts ];
\r
2147 iColon = port.find(":");
\r
2148 if ( iColon != std::string::npos ) {
\r
2149 port = port.substr( 0, iColon );
\r
2150 if ( port != previousPort ) {
\r
2151 if ( nDevices == device ) deviceName = port;
\r
2153 previousPort = port;
\r
2156 } while ( ports[++nPorts] );
\r
2160 if ( device >= nDevices ) {
\r
2161 errorText_ = "RtApiJack::probeDeviceOpen: device ID is invalid!";
\r
2165 // Count the available ports containing the client name as device
\r
2166 // channels. Jack "input ports" equal RtAudio output channels.
\r
2167 unsigned int nChannels = 0;
\r
2168 unsigned long flag = JackPortIsInput;
\r
2169 if ( mode == INPUT ) flag = JackPortIsOutput;
\r
2170 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
\r
2172 while ( ports[ nChannels ] ) nChannels++;
\r
2176 // Compare the jack ports for specified client to the requested number of channels.
\r
2177 if ( nChannels < (channels + firstChannel) ) {
\r
2178 errorStream_ << "RtApiJack::probeDeviceOpen: requested number of channels (" << channels << ") + offset (" << firstChannel << ") not found for specified device (" << device << ":" << deviceName << ").";
\r
2179 errorText_ = errorStream_.str();
\r
2183 // Check the jack server sample rate.
\r
2184 unsigned int jackRate = jack_get_sample_rate( client );
\r
2185 if ( sampleRate != jackRate ) {
\r
2186 jack_client_close( client );
\r
2187 errorStream_ << "RtApiJack::probeDeviceOpen: the requested sample rate (" << sampleRate << ") is different than the JACK server rate (" << jackRate << ").";
\r
2188 errorText_ = errorStream_.str();
\r
2191 stream_.sampleRate = jackRate;
\r
2193 // Get the latency of the JACK port.
\r
2194 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
\r
2195 if ( ports[ firstChannel ] ) {
\r
2196 // Added by Ge Wang
\r
2197 jack_latency_callback_mode_t cbmode = (mode == INPUT ? JackCaptureLatency : JackPlaybackLatency);
\r
2198 // the range (usually the min and max are equal)
\r
2199 jack_latency_range_t latrange; latrange.min = latrange.max = 0;
\r
2200 // get the latency range
\r
2201 jack_port_get_latency_range( jack_port_by_name( client, ports[firstChannel] ), cbmode, &latrange );
\r
2202 // be optimistic, use the min!
\r
2203 stream_.latency[mode] = latrange.min;
\r
2204 //stream_.latency[mode] = jack_port_get_latency( jack_port_by_name( client, ports[ firstChannel ] ) );
\r
2208 // The jack server always uses 32-bit floating-point data.
\r
2209 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
2210 stream_.userFormat = format;
\r
2212 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
2213 else stream_.userInterleaved = true;
\r
2215 // Jack always uses non-interleaved buffers.
\r
2216 stream_.deviceInterleaved[mode] = false;
\r
2218 // Jack always provides host byte-ordered data.
\r
2219 stream_.doByteSwap[mode] = false;
\r
2221 // Get the buffer size. The buffer size and number of buffers
\r
2222 // (periods) is set when the jack server is started.
\r
2223 stream_.bufferSize = (int) jack_get_buffer_size( client );
\r
2224 *bufferSize = stream_.bufferSize;
\r
2226 stream_.nDeviceChannels[mode] = channels;
\r
2227 stream_.nUserChannels[mode] = channels;
\r
2229 // Set flags for buffer conversion.
\r
2230 stream_.doConvertBuffer[mode] = false;
\r
2231 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
2232 stream_.doConvertBuffer[mode] = true;
\r
2233 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
2234 stream_.nUserChannels[mode] > 1 )
\r
2235 stream_.doConvertBuffer[mode] = true;
\r
2237 // Allocate our JackHandle structure for the stream.
\r
2238 if ( handle == 0 ) {
\r
2240 handle = new JackHandle;
\r
2242 catch ( std::bad_alloc& ) {
\r
2243 errorText_ = "RtApiJack::probeDeviceOpen: error allocating JackHandle memory.";
\r
2247 if ( pthread_cond_init(&handle->condition, NULL) ) {
\r
2248 errorText_ = "RtApiJack::probeDeviceOpen: error initializing pthread condition variable.";
\r
2251 stream_.apiHandle = (void *) handle;
\r
2252 handle->client = client;
\r
2254 handle->deviceName[mode] = deviceName;
\r
2256 // Allocate necessary internal buffers.
\r
2257 unsigned long bufferBytes;
\r
2258 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
2259 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
2260 if ( stream_.userBuffer[mode] == NULL ) {
\r
2261 errorText_ = "RtApiJack::probeDeviceOpen: error allocating user buffer memory.";
\r
2265 if ( stream_.doConvertBuffer[mode] ) {
\r
2267 bool makeBuffer = true;
\r
2268 if ( mode == OUTPUT )
\r
2269 bufferBytes = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
2270 else { // mode == INPUT
\r
2271 bufferBytes = stream_.nDeviceChannels[1] * formatBytes( stream_.deviceFormat[1] );
\r
2272 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
2273 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes(stream_.deviceFormat[0]);
\r
2274 if ( bufferBytes < bytesOut ) makeBuffer = false;
\r
2278 if ( makeBuffer ) {
\r
2279 bufferBytes *= *bufferSize;
\r
2280 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
2281 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
2282 if ( stream_.deviceBuffer == NULL ) {
\r
2283 errorText_ = "RtApiJack::probeDeviceOpen: error allocating device buffer memory.";
\r
2289 // Allocate memory for the Jack ports (channels) identifiers.
\r
2290 handle->ports[mode] = (jack_port_t **) malloc ( sizeof (jack_port_t *) * channels );
\r
2291 if ( handle->ports[mode] == NULL ) {
\r
2292 errorText_ = "RtApiJack::probeDeviceOpen: error allocating port memory.";
\r
2296 stream_.device[mode] = device;
\r
2297 stream_.channelOffset[mode] = firstChannel;
\r
2298 stream_.state = STREAM_STOPPED;
\r
2299 stream_.callbackInfo.object = (void *) this;
\r
2301 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
2302 // We had already set up the stream for output.
\r
2303 stream_.mode = DUPLEX;
\r
2305 stream_.mode = mode;
\r
2306 jack_set_process_callback( handle->client, jackCallbackHandler, (void *) &stream_.callbackInfo );
\r
2307 jack_set_xrun_callback( handle->client, jackXrun, (void *) &handle );
\r
2308 jack_on_shutdown( handle->client, jackShutdown, (void *) &stream_.callbackInfo );
\r
2311 // Register our ports.
\r
2313 if ( mode == OUTPUT ) {
\r
2314 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2315 snprintf( label, 64, "outport %d", i );
\r
2316 handle->ports[0][i] = jack_port_register( handle->client, (const char *)label,
\r
2317 JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput, 0 );
\r
2321 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2322 snprintf( label, 64, "inport %d", i );
\r
2323 handle->ports[1][i] = jack_port_register( handle->client, (const char *)label,
\r
2324 JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput, 0 );
\r
2328 // Setup the buffer conversion information structure. We don't use
\r
2329 // buffers to do channel offsets, so we override that parameter
\r
2331 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
\r
2337 pthread_cond_destroy( &handle->condition );
\r
2338 jack_client_close( handle->client );
\r
2340 if ( handle->ports[0] ) free( handle->ports[0] );
\r
2341 if ( handle->ports[1] ) free( handle->ports[1] );
\r
2344 stream_.apiHandle = 0;
\r
2347 for ( int i=0; i<2; i++ ) {
\r
2348 if ( stream_.userBuffer[i] ) {
\r
2349 free( stream_.userBuffer[i] );
\r
2350 stream_.userBuffer[i] = 0;
\r
2354 if ( stream_.deviceBuffer ) {
\r
2355 free( stream_.deviceBuffer );
\r
2356 stream_.deviceBuffer = 0;
\r
2362 void RtApiJack :: closeStream( void )
\r
2364 if ( stream_.state == STREAM_CLOSED ) {
\r
2365 errorText_ = "RtApiJack::closeStream(): no open stream to close!";
\r
2366 error( RtAudioError::WARNING );
\r
2370 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2373 if ( stream_.state == STREAM_RUNNING )
\r
2374 jack_deactivate( handle->client );
\r
2376 jack_client_close( handle->client );
\r
2380 if ( handle->ports[0] ) free( handle->ports[0] );
\r
2381 if ( handle->ports[1] ) free( handle->ports[1] );
\r
2382 pthread_cond_destroy( &handle->condition );
\r
2384 stream_.apiHandle = 0;
\r
2387 for ( int i=0; i<2; i++ ) {
\r
2388 if ( stream_.userBuffer[i] ) {
\r
2389 free( stream_.userBuffer[i] );
\r
2390 stream_.userBuffer[i] = 0;
\r
2394 if ( stream_.deviceBuffer ) {
\r
2395 free( stream_.deviceBuffer );
\r
2396 stream_.deviceBuffer = 0;
\r
2399 stream_.mode = UNINITIALIZED;
\r
2400 stream_.state = STREAM_CLOSED;
\r
2403 void RtApiJack :: startStream( void )
\r
2406 if ( stream_.state == STREAM_RUNNING ) {
\r
2407 errorText_ = "RtApiJack::startStream(): the stream is already running!";
\r
2408 error( RtAudioError::WARNING );
\r
2412 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2413 int result = jack_activate( handle->client );
\r
2415 errorText_ = "RtApiJack::startStream(): unable to activate JACK client!";
\r
2419 const char **ports;
\r
2421 // Get the list of available ports.
\r
2422 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2424 ports = jack_get_ports( handle->client, handle->deviceName[0].c_str(), NULL, JackPortIsInput);
\r
2425 if ( ports == NULL) {
\r
2426 errorText_ = "RtApiJack::startStream(): error determining available JACK input ports!";
\r
2430 // Now make the port connections. Since RtAudio wasn't designed to
\r
2431 // allow the user to select particular channels of a device, we'll
\r
2432 // just open the first "nChannels" ports with offset.
\r
2433 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2435 if ( ports[ stream_.channelOffset[0] + i ] )
\r
2436 result = jack_connect( handle->client, jack_port_name( handle->ports[0][i] ), ports[ stream_.channelOffset[0] + i ] );
\r
2439 errorText_ = "RtApiJack::startStream(): error connecting output ports!";
\r
2446 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
2448 ports = jack_get_ports( handle->client, handle->deviceName[1].c_str(), NULL, JackPortIsOutput );
\r
2449 if ( ports == NULL) {
\r
2450 errorText_ = "RtApiJack::startStream(): error determining available JACK output ports!";
\r
2454 // Now make the port connections. See note above.
\r
2455 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2457 if ( ports[ stream_.channelOffset[1] + i ] )
\r
2458 result = jack_connect( handle->client, ports[ stream_.channelOffset[1] + i ], jack_port_name( handle->ports[1][i] ) );
\r
2461 errorText_ = "RtApiJack::startStream(): error connecting input ports!";
\r
2468 handle->drainCounter = 0;
\r
2469 handle->internalDrain = false;
\r
2470 stream_.state = STREAM_RUNNING;
\r
2473 if ( result == 0 ) return;
\r
2474 error( RtAudioError::SYSTEM_ERROR );
\r
2477 void RtApiJack :: stopStream( void )
\r
2480 if ( stream_.state == STREAM_STOPPED ) {
\r
2481 errorText_ = "RtApiJack::stopStream(): the stream is already stopped!";
\r
2482 error( RtAudioError::WARNING );
\r
2486 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2487 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2489 if ( handle->drainCounter == 0 ) {
\r
2490 handle->drainCounter = 2;
\r
2491 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
\r
2495 jack_deactivate( handle->client );
\r
2496 stream_.state = STREAM_STOPPED;
\r
2499 void RtApiJack :: abortStream( void )
\r
2502 if ( stream_.state == STREAM_STOPPED ) {
\r
2503 errorText_ = "RtApiJack::abortStream(): the stream is already stopped!";
\r
2504 error( RtAudioError::WARNING );
\r
2508 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2509 handle->drainCounter = 2;
\r
2514 // This function will be called by a spawned thread when the user
\r
2515 // callback function signals that the stream should be stopped or
\r
2516 // aborted. It is necessary to handle it this way because the
\r
2517 // callbackEvent() function must return before the jack_deactivate()
\r
2518 // function will return.
\r
2519 static void *jackStopStream( void *ptr )
\r
2521 CallbackInfo *info = (CallbackInfo *) ptr;
\r
2522 RtApiJack *object = (RtApiJack *) info->object;
\r
2524 object->stopStream();
\r
2525 pthread_exit( NULL );
\r
2528 bool RtApiJack :: callbackEvent( unsigned long nframes )
\r
2530 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
\r
2531 if ( stream_.state == STREAM_CLOSED ) {
\r
2532 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
2533 error( RtAudioError::WARNING );
\r
2536 if ( stream_.bufferSize != nframes ) {
\r
2537 errorText_ = "RtApiCore::callbackEvent(): the JACK buffer size has changed ... cannot process!";
\r
2538 error( RtAudioError::WARNING );
\r
2542 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
2543 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2545 // Check if we were draining the stream and signal is finished.
\r
2546 if ( handle->drainCounter > 3 ) {
\r
2547 ThreadHandle threadId;
\r
2549 stream_.state = STREAM_STOPPING;
\r
2550 if ( handle->internalDrain == true )
\r
2551 pthread_create( &threadId, NULL, jackStopStream, info );
\r
2553 pthread_cond_signal( &handle->condition );
\r
2557 // Invoke user callback first, to get fresh output data.
\r
2558 if ( handle->drainCounter == 0 ) {
\r
2559 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
2560 double streamTime = getStreamTime();
\r
2561 RtAudioStreamStatus status = 0;
\r
2562 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
2563 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
2564 handle->xrun[0] = false;
\r
2566 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
2567 status |= RTAUDIO_INPUT_OVERFLOW;
\r
2568 handle->xrun[1] = false;
\r
2570 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
2571 stream_.bufferSize, streamTime, status, info->userData );
\r
2572 if ( cbReturnValue == 2 ) {
\r
2573 stream_.state = STREAM_STOPPING;
\r
2574 handle->drainCounter = 2;
\r
2576 pthread_create( &id, NULL, jackStopStream, info );
\r
2579 else if ( cbReturnValue == 1 ) {
\r
2580 handle->drainCounter = 1;
\r
2581 handle->internalDrain = true;
\r
2585 jack_default_audio_sample_t *jackbuffer;
\r
2586 unsigned long bufferBytes = nframes * sizeof( jack_default_audio_sample_t );
\r
2587 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2589 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
2591 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
\r
2592 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2593 memset( jackbuffer, 0, bufferBytes );
\r
2597 else if ( stream_.doConvertBuffer[0] ) {
\r
2599 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
2601 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
\r
2602 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2603 memcpy( jackbuffer, &stream_.deviceBuffer[i*bufferBytes], bufferBytes );
\r
2606 else { // no buffer conversion
\r
2607 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2608 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2609 memcpy( jackbuffer, &stream_.userBuffer[0][i*bufferBytes], bufferBytes );
\r
2614 // Don't bother draining input
\r
2615 if ( handle->drainCounter ) {
\r
2616 handle->drainCounter++;
\r
2620 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
2622 if ( stream_.doConvertBuffer[1] ) {
\r
2623 for ( unsigned int i=0; i<stream_.nDeviceChannels[1]; i++ ) {
\r
2624 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
\r
2625 memcpy( &stream_.deviceBuffer[i*bufferBytes], jackbuffer, bufferBytes );
\r
2627 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
2629 else { // no buffer conversion
\r
2630 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2631 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
\r
2632 memcpy( &stream_.userBuffer[1][i*bufferBytes], jackbuffer, bufferBytes );
\r
2638 RtApi::tickStreamTime();
\r
2641 //******************** End of __UNIX_JACK__ *********************//
\r
2644 #if defined(__WINDOWS_ASIO__) // ASIO API on Windows
\r
2646 // The ASIO API is designed around a callback scheme, so this
\r
2647 // implementation is similar to that used for OS-X CoreAudio and Linux
\r
2648 // Jack. The primary constraint with ASIO is that it only allows
\r
2649 // access to a single driver at a time. Thus, it is not possible to
\r
2650 // have more than one simultaneous RtAudio stream.
\r
2652 // This implementation also requires a number of external ASIO files
\r
2653 // and a few global variables. The ASIO callback scheme does not
\r
2654 // allow for the passing of user data, so we must create a global
\r
2655 // pointer to our callbackInfo structure.
\r
2657 // On unix systems, we make use of a pthread condition variable.
\r
2658 // Since there is no equivalent in Windows, I hacked something based
\r
2659 // on information found in
\r
2660 // http://www.cs.wustl.edu/~schmidt/win32-cv-1.html.
\r
2662 #include "asiosys.h"
\r
2664 #include "iasiothiscallresolver.h"
\r
2665 #include "asiodrivers.h"
\r
2668 static AsioDrivers drivers;
\r
2669 static ASIOCallbacks asioCallbacks;
\r
2670 static ASIODriverInfo driverInfo;
\r
2671 static CallbackInfo *asioCallbackInfo;
\r
2672 static bool asioXRun;
\r
2674 struct AsioHandle {
\r
2675 int drainCounter; // Tracks callback counts when draining
\r
2676 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
2677 ASIOBufferInfo *bufferInfos;
\r
2681 :drainCounter(0), internalDrain(false), bufferInfos(0) {}
\r
2684 // Function declarations (definitions at end of section)
\r
2685 static const char* getAsioErrorString( ASIOError result );
\r
2686 static void sampleRateChanged( ASIOSampleRate sRate );
\r
2687 static long asioMessages( long selector, long value, void* message, double* opt );
\r
2689 RtApiAsio :: RtApiAsio()
\r
2691 // ASIO cannot run on a multi-threaded appartment. You can call
\r
2692 // CoInitialize beforehand, but it must be for appartment threading
\r
2693 // (in which case, CoInitilialize will return S_FALSE here).
\r
2694 coInitialized_ = false;
\r
2695 HRESULT hr = CoInitialize( NULL );
\r
2696 if ( FAILED(hr) ) {
\r
2697 errorText_ = "RtApiAsio::ASIO requires a single-threaded appartment. Call CoInitializeEx(0,COINIT_APARTMENTTHREADED)";
\r
2698 error( RtAudioError::WARNING );
\r
2700 coInitialized_ = true;
\r
2702 drivers.removeCurrentDriver();
\r
2703 driverInfo.asioVersion = 2;
\r
2705 // See note in DirectSound implementation about GetDesktopWindow().
\r
2706 driverInfo.sysRef = GetForegroundWindow();
\r
2709 RtApiAsio :: ~RtApiAsio()
\r
2711 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
2712 if ( coInitialized_ ) CoUninitialize();
\r
2715 unsigned int RtApiAsio :: getDeviceCount( void )
\r
2717 return (unsigned int) drivers.asioGetNumDev();
\r
2720 RtAudio::DeviceInfo RtApiAsio :: getDeviceInfo( unsigned int device )
\r
2722 RtAudio::DeviceInfo info;
\r
2723 info.probed = false;
\r
2726 unsigned int nDevices = getDeviceCount();
\r
2727 if ( nDevices == 0 ) {
\r
2728 errorText_ = "RtApiAsio::getDeviceInfo: no devices found!";
\r
2729 error( RtAudioError::INVALID_USE );
\r
2733 if ( device >= nDevices ) {
\r
2734 errorText_ = "RtApiAsio::getDeviceInfo: device ID is invalid!";
\r
2735 error( RtAudioError::INVALID_USE );
\r
2739 // If a stream is already open, we cannot probe other devices. Thus, use the saved results.
\r
2740 if ( stream_.state != STREAM_CLOSED ) {
\r
2741 if ( device >= devices_.size() ) {
\r
2742 errorText_ = "RtApiAsio::getDeviceInfo: device ID was not present before stream was opened.";
\r
2743 error( RtAudioError::WARNING );
\r
2746 return devices_[ device ];
\r
2749 char driverName[32];
\r
2750 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
\r
2751 if ( result != ASE_OK ) {
\r
2752 errorStream_ << "RtApiAsio::getDeviceInfo: unable to get driver name (" << getAsioErrorString( result ) << ").";
\r
2753 errorText_ = errorStream_.str();
\r
2754 error( RtAudioError::WARNING );
\r
2758 info.name = driverName;
\r
2760 if ( !drivers.loadDriver( driverName ) ) {
\r
2761 errorStream_ << "RtApiAsio::getDeviceInfo: unable to load driver (" << driverName << ").";
\r
2762 errorText_ = errorStream_.str();
\r
2763 error( RtAudioError::WARNING );
\r
2767 result = ASIOInit( &driverInfo );
\r
2768 if ( result != ASE_OK ) {
\r
2769 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
\r
2770 errorText_ = errorStream_.str();
\r
2771 error( RtAudioError::WARNING );
\r
2775 // Determine the device channel information.
\r
2776 long inputChannels, outputChannels;
\r
2777 result = ASIOGetChannels( &inputChannels, &outputChannels );
\r
2778 if ( result != ASE_OK ) {
\r
2779 drivers.removeCurrentDriver();
\r
2780 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
\r
2781 errorText_ = errorStream_.str();
\r
2782 error( RtAudioError::WARNING );
\r
2786 info.outputChannels = outputChannels;
\r
2787 info.inputChannels = inputChannels;
\r
2788 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
2789 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
2791 // Determine the supported sample rates.
\r
2792 info.sampleRates.clear();
\r
2793 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
\r
2794 result = ASIOCanSampleRate( (ASIOSampleRate) SAMPLE_RATES[i] );
\r
2795 if ( result == ASE_OK ) {
\r
2796 info.sampleRates.push_back( SAMPLE_RATES[i] );
\r
2798 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
\r
2799 info.preferredSampleRate = SAMPLE_RATES[i];
\r
2803 // Determine supported data types ... just check first channel and assume rest are the same.
\r
2804 ASIOChannelInfo channelInfo;
\r
2805 channelInfo.channel = 0;
\r
2806 channelInfo.isInput = true;
\r
2807 if ( info.inputChannels <= 0 ) channelInfo.isInput = false;
\r
2808 result = ASIOGetChannelInfo( &channelInfo );
\r
2809 if ( result != ASE_OK ) {
\r
2810 drivers.removeCurrentDriver();
\r
2811 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting driver channel info (" << driverName << ").";
\r
2812 errorText_ = errorStream_.str();
\r
2813 error( RtAudioError::WARNING );
\r
2817 info.nativeFormats = 0;
\r
2818 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB )
\r
2819 info.nativeFormats |= RTAUDIO_SINT16;
\r
2820 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB )
\r
2821 info.nativeFormats |= RTAUDIO_SINT32;
\r
2822 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB )
\r
2823 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
2824 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB )
\r
2825 info.nativeFormats |= RTAUDIO_FLOAT64;
\r
2826 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB )
\r
2827 info.nativeFormats |= RTAUDIO_SINT24;
\r
2829 if ( info.outputChannels > 0 )
\r
2830 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
\r
2831 if ( info.inputChannels > 0 )
\r
2832 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
\r
2834 info.probed = true;
\r
2835 drivers.removeCurrentDriver();
\r
2839 static void bufferSwitch( long index, ASIOBool /*processNow*/ )
\r
2841 RtApiAsio *object = (RtApiAsio *) asioCallbackInfo->object;
\r
2842 object->callbackEvent( index );
\r
2845 void RtApiAsio :: saveDeviceInfo( void )
\r
2849 unsigned int nDevices = getDeviceCount();
\r
2850 devices_.resize( nDevices );
\r
2851 for ( unsigned int i=0; i<nDevices; i++ )
\r
2852 devices_[i] = getDeviceInfo( i );
\r
2855 bool RtApiAsio :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
2856 unsigned int firstChannel, unsigned int sampleRate,
\r
2857 RtAudioFormat format, unsigned int *bufferSize,
\r
2858 RtAudio::StreamOptions *options )
\r
2859 {////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
\r
2861 bool isDuplexInput = mode == INPUT && stream_.mode == OUTPUT;
\r
2863 // For ASIO, a duplex stream MUST use the same driver.
\r
2864 if ( isDuplexInput && stream_.device[0] != device ) {
\r
2865 errorText_ = "RtApiAsio::probeDeviceOpen: an ASIO duplex stream must use the same device for input and output!";
\r
2869 char driverName[32];
\r
2870 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
\r
2871 if ( result != ASE_OK ) {
\r
2872 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to get driver name (" << getAsioErrorString( result ) << ").";
\r
2873 errorText_ = errorStream_.str();
\r
2877 // Only load the driver once for duplex stream.
\r
2878 if ( !isDuplexInput ) {
\r
2879 // The getDeviceInfo() function will not work when a stream is open
\r
2880 // because ASIO does not allow multiple devices to run at the same
\r
2881 // time. Thus, we'll probe the system before opening a stream and
\r
2882 // save the results for use by getDeviceInfo().
\r
2883 this->saveDeviceInfo();
\r
2885 if ( !drivers.loadDriver( driverName ) ) {
\r
2886 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to load driver (" << driverName << ").";
\r
2887 errorText_ = errorStream_.str();
\r
2891 result = ASIOInit( &driverInfo );
\r
2892 if ( result != ASE_OK ) {
\r
2893 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
\r
2894 errorText_ = errorStream_.str();
\r
2899 // keep them before any "goto error", they are used for error cleanup + goto device boundary checks
\r
2900 bool buffersAllocated = false;
\r
2901 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
2902 unsigned int nChannels;
\r
2905 // Check the device channel count.
\r
2906 long inputChannels, outputChannels;
\r
2907 result = ASIOGetChannels( &inputChannels, &outputChannels );
\r
2908 if ( result != ASE_OK ) {
\r
2909 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
\r
2910 errorText_ = errorStream_.str();
\r
2914 if ( ( mode == OUTPUT && (channels+firstChannel) > (unsigned int) outputChannels) ||
\r
2915 ( mode == INPUT && (channels+firstChannel) > (unsigned int) inputChannels) ) {
\r
2916 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested channel count (" << channels << ") + offset (" << firstChannel << ").";
\r
2917 errorText_ = errorStream_.str();
\r
2920 stream_.nDeviceChannels[mode] = channels;
\r
2921 stream_.nUserChannels[mode] = channels;
\r
2922 stream_.channelOffset[mode] = firstChannel;
\r
2924 // Verify the sample rate is supported.
\r
2925 result = ASIOCanSampleRate( (ASIOSampleRate) sampleRate );
\r
2926 if ( result != ASE_OK ) {
\r
2927 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested sample rate (" << sampleRate << ").";
\r
2928 errorText_ = errorStream_.str();
\r
2932 // Get the current sample rate
\r
2933 ASIOSampleRate currentRate;
\r
2934 result = ASIOGetSampleRate( ¤tRate );
\r
2935 if ( result != ASE_OK ) {
\r
2936 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error getting sample rate.";
\r
2937 errorText_ = errorStream_.str();
\r
2941 // Set the sample rate only if necessary
\r
2942 if ( currentRate != sampleRate ) {
\r
2943 result = ASIOSetSampleRate( (ASIOSampleRate) sampleRate );
\r
2944 if ( result != ASE_OK ) {
\r
2945 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error setting sample rate (" << sampleRate << ").";
\r
2946 errorText_ = errorStream_.str();
\r
2951 // Determine the driver data type.
\r
2952 ASIOChannelInfo channelInfo;
\r
2953 channelInfo.channel = 0;
\r
2954 if ( mode == OUTPUT ) channelInfo.isInput = false;
\r
2955 else channelInfo.isInput = true;
\r
2956 result = ASIOGetChannelInfo( &channelInfo );
\r
2957 if ( result != ASE_OK ) {
\r
2958 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting data format.";
\r
2959 errorText_ = errorStream_.str();
\r
2963 // Assuming WINDOWS host is always little-endian.
\r
2964 stream_.doByteSwap[mode] = false;
\r
2965 stream_.userFormat = format;
\r
2966 stream_.deviceFormat[mode] = 0;
\r
2967 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB ) {
\r
2968 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
2969 if ( channelInfo.type == ASIOSTInt16MSB ) stream_.doByteSwap[mode] = true;
\r
2971 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB ) {
\r
2972 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
2973 if ( channelInfo.type == ASIOSTInt32MSB ) stream_.doByteSwap[mode] = true;
\r
2975 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB ) {
\r
2976 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
2977 if ( channelInfo.type == ASIOSTFloat32MSB ) stream_.doByteSwap[mode] = true;
\r
2979 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB ) {
\r
2980 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
\r
2981 if ( channelInfo.type == ASIOSTFloat64MSB ) stream_.doByteSwap[mode] = true;
\r
2983 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB ) {
\r
2984 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
2985 if ( channelInfo.type == ASIOSTInt24MSB ) stream_.doByteSwap[mode] = true;
\r
2988 if ( stream_.deviceFormat[mode] == 0 ) {
\r
2989 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") data format not supported by RtAudio.";
\r
2990 errorText_ = errorStream_.str();
\r
2994 // Set the buffer size. For a duplex stream, this will end up
\r
2995 // setting the buffer size based on the input constraints, which
\r
2997 long minSize, maxSize, preferSize, granularity;
\r
2998 result = ASIOGetBufferSize( &minSize, &maxSize, &preferSize, &granularity );
\r
2999 if ( result != ASE_OK ) {
\r
3000 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting buffer size.";
\r
3001 errorText_ = errorStream_.str();
\r
3005 if ( isDuplexInput ) {
\r
3006 // When this is the duplex input (output was opened before), then we have to use the same
\r
3007 // buffersize as the output, because it might use the preferred buffer size, which most
\r
3008 // likely wasn't passed as input to this. The buffer sizes have to be identically anyway,
\r
3009 // So instead of throwing an error, make them equal. The caller uses the reference
\r
3010 // to the "bufferSize" param as usual to set up processing buffers.
\r
3012 *bufferSize = stream_.bufferSize;
\r
3015 if ( *bufferSize == 0 ) *bufferSize = preferSize;
\r
3016 else if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
\r
3017 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
\r
3018 else if ( granularity == -1 ) {
\r
3019 // Make sure bufferSize is a power of two.
\r
3020 int log2_of_min_size = 0;
\r
3021 int log2_of_max_size = 0;
\r
3023 for ( unsigned int i = 0; i < sizeof(long) * 8; i++ ) {
\r
3024 if ( minSize & ((long)1 << i) ) log2_of_min_size = i;
\r
3025 if ( maxSize & ((long)1 << i) ) log2_of_max_size = i;
\r
3028 long min_delta = std::abs( (long)*bufferSize - ((long)1 << log2_of_min_size) );
\r
3029 int min_delta_num = log2_of_min_size;
\r
3031 for (int i = log2_of_min_size + 1; i <= log2_of_max_size; i++) {
\r
3032 long current_delta = std::abs( (long)*bufferSize - ((long)1 << i) );
\r
3033 if (current_delta < min_delta) {
\r
3034 min_delta = current_delta;
\r
3035 min_delta_num = i;
\r
3039 *bufferSize = ( (unsigned int)1 << min_delta_num );
\r
3040 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
\r
3041 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
\r
3043 else if ( granularity != 0 ) {
\r
3044 // Set to an even multiple of granularity, rounding up.
\r
3045 *bufferSize = (*bufferSize + granularity-1) / granularity * granularity;
\r
3050 // we don't use it anymore, see above!
\r
3051 // Just left it here for the case...
\r
3052 if ( isDuplexInput && stream_.bufferSize != *bufferSize ) {
\r
3053 errorText_ = "RtApiAsio::probeDeviceOpen: input/output buffersize discrepancy!";
\r
3058 stream_.bufferSize = *bufferSize;
\r
3059 stream_.nBuffers = 2;
\r
3061 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
3062 else stream_.userInterleaved = true;
\r
3064 // ASIO always uses non-interleaved buffers.
\r
3065 stream_.deviceInterleaved[mode] = false;
\r
3067 // Allocate, if necessary, our AsioHandle structure for the stream.
\r
3068 if ( handle == 0 ) {
\r
3070 handle = new AsioHandle;
\r
3072 catch ( std::bad_alloc& ) {
\r
3073 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating AsioHandle memory.";
\r
3076 handle->bufferInfos = 0;
\r
3078 // Create a manual-reset event.
\r
3079 handle->condition = CreateEvent( NULL, // no security
\r
3080 TRUE, // manual-reset
\r
3081 FALSE, // non-signaled initially
\r
3082 NULL ); // unnamed
\r
3083 stream_.apiHandle = (void *) handle;
\r
3086 // Create the ASIO internal buffers. Since RtAudio sets up input
\r
3087 // and output separately, we'll have to dispose of previously
\r
3088 // created output buffers for a duplex stream.
\r
3089 if ( mode == INPUT && stream_.mode == OUTPUT ) {
\r
3090 ASIODisposeBuffers();
\r
3091 if ( handle->bufferInfos ) free( handle->bufferInfos );
\r
3094 // Allocate, initialize, and save the bufferInfos in our stream callbackInfo structure.
\r
3096 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
\r
3097 handle->bufferInfos = (ASIOBufferInfo *) malloc( nChannels * sizeof(ASIOBufferInfo) );
\r
3098 if ( handle->bufferInfos == NULL ) {
\r
3099 errorStream_ << "RtApiAsio::probeDeviceOpen: error allocating bufferInfo memory for driver (" << driverName << ").";
\r
3100 errorText_ = errorStream_.str();
\r
3104 ASIOBufferInfo *infos;
\r
3105 infos = handle->bufferInfos;
\r
3106 for ( i=0; i<stream_.nDeviceChannels[0]; i++, infos++ ) {
\r
3107 infos->isInput = ASIOFalse;
\r
3108 infos->channelNum = i + stream_.channelOffset[0];
\r
3109 infos->buffers[0] = infos->buffers[1] = 0;
\r
3111 for ( i=0; i<stream_.nDeviceChannels[1]; i++, infos++ ) {
\r
3112 infos->isInput = ASIOTrue;
\r
3113 infos->channelNum = i + stream_.channelOffset[1];
\r
3114 infos->buffers[0] = infos->buffers[1] = 0;
\r
3117 // prepare for callbacks
\r
3118 stream_.sampleRate = sampleRate;
\r
3119 stream_.device[mode] = device;
\r
3120 stream_.mode = isDuplexInput ? DUPLEX : mode;
\r
3122 // store this class instance before registering callbacks, that are going to use it
\r
3123 asioCallbackInfo = &stream_.callbackInfo;
\r
3124 stream_.callbackInfo.object = (void *) this;
\r
3126 // Set up the ASIO callback structure and create the ASIO data buffers.
\r
3127 asioCallbacks.bufferSwitch = &bufferSwitch;
\r
3128 asioCallbacks.sampleRateDidChange = &sampleRateChanged;
\r
3129 asioCallbacks.asioMessage = &asioMessages;
\r
3130 asioCallbacks.bufferSwitchTimeInfo = NULL;
\r
3131 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
\r
3132 if ( result != ASE_OK ) {
\r
3133 // Standard method failed. This can happen with strict/misbehaving drivers that return valid buffer size ranges
\r
3134 // but only accept the preferred buffer size as parameter for ASIOCreateBuffers. eg. Creatives ASIO driver
\r
3135 // in that case, let's be naïve and try that instead
\r
3136 *bufferSize = preferSize;
\r
3137 stream_.bufferSize = *bufferSize;
\r
3138 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
\r
3141 if ( result != ASE_OK ) {
\r
3142 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") creating buffers.";
\r
3143 errorText_ = errorStream_.str();
\r
3146 buffersAllocated = true;
\r
3147 stream_.state = STREAM_STOPPED;
\r
3149 // Set flags for buffer conversion.
\r
3150 stream_.doConvertBuffer[mode] = false;
\r
3151 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
3152 stream_.doConvertBuffer[mode] = true;
\r
3153 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
3154 stream_.nUserChannels[mode] > 1 )
\r
3155 stream_.doConvertBuffer[mode] = true;
\r
3157 // Allocate necessary internal buffers
\r
3158 unsigned long bufferBytes;
\r
3159 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
3160 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
3161 if ( stream_.userBuffer[mode] == NULL ) {
\r
3162 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating user buffer memory.";
\r
3166 if ( stream_.doConvertBuffer[mode] ) {
\r
3168 bool makeBuffer = true;
\r
3169 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
3170 if ( isDuplexInput && stream_.deviceBuffer ) {
\r
3171 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
3172 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
3175 if ( makeBuffer ) {
\r
3176 bufferBytes *= *bufferSize;
\r
3177 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
3178 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
3179 if ( stream_.deviceBuffer == NULL ) {
\r
3180 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating device buffer memory.";
\r
3186 // Determine device latencies
\r
3187 long inputLatency, outputLatency;
\r
3188 result = ASIOGetLatencies( &inputLatency, &outputLatency );
\r
3189 if ( result != ASE_OK ) {
\r
3190 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting latency.";
\r
3191 errorText_ = errorStream_.str();
\r
3192 error( RtAudioError::WARNING); // warn but don't fail
\r
3195 stream_.latency[0] = outputLatency;
\r
3196 stream_.latency[1] = inputLatency;
\r
3199 // Setup the buffer conversion information structure. We don't use
\r
3200 // buffers to do channel offsets, so we override that parameter
\r
3202 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
\r
3207 if ( !isDuplexInput ) {
\r
3208 // the cleanup for error in the duplex input, is done by RtApi::openStream
\r
3209 // So we clean up for single channel only
\r
3211 if ( buffersAllocated )
\r
3212 ASIODisposeBuffers();
\r
3214 drivers.removeCurrentDriver();
\r
3217 CloseHandle( handle->condition );
\r
3218 if ( handle->bufferInfos )
\r
3219 free( handle->bufferInfos );
\r
3222 stream_.apiHandle = 0;
\r
3226 if ( stream_.userBuffer[mode] ) {
\r
3227 free( stream_.userBuffer[mode] );
\r
3228 stream_.userBuffer[mode] = 0;
\r
3231 if ( stream_.deviceBuffer ) {
\r
3232 free( stream_.deviceBuffer );
\r
3233 stream_.deviceBuffer = 0;
\r
3238 }////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
\r
3240 void RtApiAsio :: closeStream()
\r
3242 if ( stream_.state == STREAM_CLOSED ) {
\r
3243 errorText_ = "RtApiAsio::closeStream(): no open stream to close!";
\r
3244 error( RtAudioError::WARNING );
\r
3248 if ( stream_.state == STREAM_RUNNING ) {
\r
3249 stream_.state = STREAM_STOPPED;
\r
3252 ASIODisposeBuffers();
\r
3253 drivers.removeCurrentDriver();
\r
3255 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3257 CloseHandle( handle->condition );
\r
3258 if ( handle->bufferInfos )
\r
3259 free( handle->bufferInfos );
\r
3261 stream_.apiHandle = 0;
\r
3264 for ( int i=0; i<2; i++ ) {
\r
3265 if ( stream_.userBuffer[i] ) {
\r
3266 free( stream_.userBuffer[i] );
\r
3267 stream_.userBuffer[i] = 0;
\r
3271 if ( stream_.deviceBuffer ) {
\r
3272 free( stream_.deviceBuffer );
\r
3273 stream_.deviceBuffer = 0;
\r
3276 stream_.mode = UNINITIALIZED;
\r
3277 stream_.state = STREAM_CLOSED;
\r
3280 bool stopThreadCalled = false;
\r
3282 void RtApiAsio :: startStream()
\r
3285 if ( stream_.state == STREAM_RUNNING ) {
\r
3286 errorText_ = "RtApiAsio::startStream(): the stream is already running!";
\r
3287 error( RtAudioError::WARNING );
\r
3291 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3292 ASIOError result = ASIOStart();
\r
3293 if ( result != ASE_OK ) {
\r
3294 errorStream_ << "RtApiAsio::startStream: error (" << getAsioErrorString( result ) << ") starting device.";
\r
3295 errorText_ = errorStream_.str();
\r
3299 handle->drainCounter = 0;
\r
3300 handle->internalDrain = false;
\r
3301 ResetEvent( handle->condition );
\r
3302 stream_.state = STREAM_RUNNING;
\r
3306 stopThreadCalled = false;
\r
3308 if ( result == ASE_OK ) return;
\r
3309 error( RtAudioError::SYSTEM_ERROR );
\r
3312 void RtApiAsio :: stopStream()
\r
3315 if ( stream_.state == STREAM_STOPPED ) {
\r
3316 errorText_ = "RtApiAsio::stopStream(): the stream is already stopped!";
\r
3317 error( RtAudioError::WARNING );
\r
3321 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3322 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
3323 if ( handle->drainCounter == 0 ) {
\r
3324 handle->drainCounter = 2;
\r
3325 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
\r
3329 stream_.state = STREAM_STOPPED;
\r
3331 ASIOError result = ASIOStop();
\r
3332 if ( result != ASE_OK ) {
\r
3333 errorStream_ << "RtApiAsio::stopStream: error (" << getAsioErrorString( result ) << ") stopping device.";
\r
3334 errorText_ = errorStream_.str();
\r
3337 if ( result == ASE_OK ) return;
\r
3338 error( RtAudioError::SYSTEM_ERROR );
\r
3341 void RtApiAsio :: abortStream()
\r
3344 if ( stream_.state == STREAM_STOPPED ) {
\r
3345 errorText_ = "RtApiAsio::abortStream(): the stream is already stopped!";
\r
3346 error( RtAudioError::WARNING );
\r
3350 // The following lines were commented-out because some behavior was
\r
3351 // noted where the device buffers need to be zeroed to avoid
\r
3352 // continuing sound, even when the device buffers are completely
\r
3353 // disposed. So now, calling abort is the same as calling stop.
\r
3354 // AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3355 // handle->drainCounter = 2;
\r
3359 // This function will be called by a spawned thread when the user
\r
3360 // callback function signals that the stream should be stopped or
\r
3361 // aborted. It is necessary to handle it this way because the
\r
3362 // callbackEvent() function must return before the ASIOStop()
\r
3363 // function will return.
\r
3364 static unsigned __stdcall asioStopStream( void *ptr )
\r
3366 CallbackInfo *info = (CallbackInfo *) ptr;
\r
3367 RtApiAsio *object = (RtApiAsio *) info->object;
\r
3369 object->stopStream();
\r
3370 _endthreadex( 0 );
\r
3374 bool RtApiAsio :: callbackEvent( long bufferIndex )
\r
3376 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
\r
3377 if ( stream_.state == STREAM_CLOSED ) {
\r
3378 errorText_ = "RtApiAsio::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
3379 error( RtAudioError::WARNING );
\r
3383 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
3384 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3386 // Check if we were draining the stream and signal if finished.
\r
3387 if ( handle->drainCounter > 3 ) {
\r
3389 stream_.state = STREAM_STOPPING;
\r
3390 if ( handle->internalDrain == false )
\r
3391 SetEvent( handle->condition );
\r
3392 else { // spawn a thread to stop the stream
\r
3393 unsigned threadId;
\r
3394 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
\r
3395 &stream_.callbackInfo, 0, &threadId );
\r
3400 // Invoke user callback to get fresh output data UNLESS we are
\r
3401 // draining stream.
\r
3402 if ( handle->drainCounter == 0 ) {
\r
3403 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
3404 double streamTime = getStreamTime();
\r
3405 RtAudioStreamStatus status = 0;
\r
3406 if ( stream_.mode != INPUT && asioXRun == true ) {
\r
3407 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
3410 if ( stream_.mode != OUTPUT && asioXRun == true ) {
\r
3411 status |= RTAUDIO_INPUT_OVERFLOW;
\r
3414 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
3415 stream_.bufferSize, streamTime, status, info->userData );
\r
3416 if ( cbReturnValue == 2 ) {
\r
3417 stream_.state = STREAM_STOPPING;
\r
3418 handle->drainCounter = 2;
\r
3419 unsigned threadId;
\r
3420 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
\r
3421 &stream_.callbackInfo, 0, &threadId );
\r
3424 else if ( cbReturnValue == 1 ) {
\r
3425 handle->drainCounter = 1;
\r
3426 handle->internalDrain = true;
\r
3430 unsigned int nChannels, bufferBytes, i, j;
\r
3431 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
\r
3432 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
3434 bufferBytes = stream_.bufferSize * formatBytes( stream_.deviceFormat[0] );
\r
3436 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
3438 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3439 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3440 memset( handle->bufferInfos[i].buffers[bufferIndex], 0, bufferBytes );
\r
3444 else if ( stream_.doConvertBuffer[0] ) {
\r
3446 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
3447 if ( stream_.doByteSwap[0] )
\r
3448 byteSwapBuffer( stream_.deviceBuffer,
\r
3449 stream_.bufferSize * stream_.nDeviceChannels[0],
\r
3450 stream_.deviceFormat[0] );
\r
3452 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3453 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3454 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
\r
3455 &stream_.deviceBuffer[j++*bufferBytes], bufferBytes );
\r
3461 if ( stream_.doByteSwap[0] )
\r
3462 byteSwapBuffer( stream_.userBuffer[0],
\r
3463 stream_.bufferSize * stream_.nUserChannels[0],
\r
3464 stream_.userFormat );
\r
3466 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3467 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3468 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
\r
3469 &stream_.userBuffer[0][bufferBytes*j++], bufferBytes );
\r
3475 // Don't bother draining input
\r
3476 if ( handle->drainCounter ) {
\r
3477 handle->drainCounter++;
\r
3481 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
3483 bufferBytes = stream_.bufferSize * formatBytes(stream_.deviceFormat[1]);
\r
3485 if (stream_.doConvertBuffer[1]) {
\r
3487 // Always interleave ASIO input data.
\r
3488 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3489 if ( handle->bufferInfos[i].isInput == ASIOTrue )
\r
3490 memcpy( &stream_.deviceBuffer[j++*bufferBytes],
\r
3491 handle->bufferInfos[i].buffers[bufferIndex],
\r
3495 if ( stream_.doByteSwap[1] )
\r
3496 byteSwapBuffer( stream_.deviceBuffer,
\r
3497 stream_.bufferSize * stream_.nDeviceChannels[1],
\r
3498 stream_.deviceFormat[1] );
\r
3499 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
3503 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3504 if ( handle->bufferInfos[i].isInput == ASIOTrue ) {
\r
3505 memcpy( &stream_.userBuffer[1][bufferBytes*j++],
\r
3506 handle->bufferInfos[i].buffers[bufferIndex],
\r
3511 if ( stream_.doByteSwap[1] )
\r
3512 byteSwapBuffer( stream_.userBuffer[1],
\r
3513 stream_.bufferSize * stream_.nUserChannels[1],
\r
3514 stream_.userFormat );
\r
3519 // The following call was suggested by Malte Clasen. While the API
\r
3520 // documentation indicates it should not be required, some device
\r
3521 // drivers apparently do not function correctly without it.
\r
3522 ASIOOutputReady();
\r
3524 RtApi::tickStreamTime();
\r
3528 static void sampleRateChanged( ASIOSampleRate sRate )
\r
3530 // The ASIO documentation says that this usually only happens during
\r
3531 // external sync. Audio processing is not stopped by the driver,
\r
3532 // actual sample rate might not have even changed, maybe only the
\r
3533 // sample rate status of an AES/EBU or S/PDIF digital input at the
\r
3536 RtApi *object = (RtApi *) asioCallbackInfo->object;
\r
3538 object->stopStream();
\r
3540 catch ( RtAudioError &exception ) {
\r
3541 std::cerr << "\nRtApiAsio: sampleRateChanged() error (" << exception.getMessage() << ")!\n" << std::endl;
\r
3545 std::cerr << "\nRtApiAsio: driver reports sample rate changed to " << sRate << " ... stream stopped!!!\n" << std::endl;
\r
3548 static long asioMessages( long selector, long value, void* /*message*/, double* /*opt*/ )
\r
3552 switch( selector ) {
\r
3553 case kAsioSelectorSupported:
\r
3554 if ( value == kAsioResetRequest
\r
3555 || value == kAsioEngineVersion
\r
3556 || value == kAsioResyncRequest
\r
3557 || value == kAsioLatenciesChanged
\r
3558 // The following three were added for ASIO 2.0, you don't
\r
3559 // necessarily have to support them.
\r
3560 || value == kAsioSupportsTimeInfo
\r
3561 || value == kAsioSupportsTimeCode
\r
3562 || value == kAsioSupportsInputMonitor)
\r
3565 case kAsioResetRequest:
\r
3566 // Defer the task and perform the reset of the driver during the
\r
3567 // next "safe" situation. You cannot reset the driver right now,
\r
3568 // as this code is called from the driver. Reset the driver is
\r
3569 // done by completely destruct is. I.e. ASIOStop(),
\r
3570 // ASIODisposeBuffers(), Destruction Afterwards you initialize the
\r
3572 std::cerr << "\nRtApiAsio: driver reset requested!!!" << std::endl;
\r
3575 case kAsioResyncRequest:
\r
3576 // This informs the application that the driver encountered some
\r
3577 // non-fatal data loss. It is used for synchronization purposes
\r
3578 // of different media. Added mainly to work around the Win16Mutex
\r
3579 // problems in Windows 95/98 with the Windows Multimedia system,
\r
3580 // which could lose data because the Mutex was held too long by
\r
3581 // another thread. However a driver can issue it in other
\r
3582 // situations, too.
\r
3583 // std::cerr << "\nRtApiAsio: driver resync requested!!!" << std::endl;
\r
3587 case kAsioLatenciesChanged:
\r
3588 // This will inform the host application that the drivers were
\r
3589 // latencies changed. Beware, it this does not mean that the
\r
3590 // buffer sizes have changed! You might need to update internal
\r
3592 std::cerr << "\nRtApiAsio: driver latency may have changed!!!" << std::endl;
\r
3595 case kAsioEngineVersion:
\r
3596 // Return the supported ASIO version of the host application. If
\r
3597 // a host application does not implement this selector, ASIO 1.0
\r
3598 // is assumed by the driver.
\r
3601 case kAsioSupportsTimeInfo:
\r
3602 // Informs the driver whether the
\r
3603 // asioCallbacks.bufferSwitchTimeInfo() callback is supported.
\r
3604 // For compatibility with ASIO 1.0 drivers the host application
\r
3605 // should always support the "old" bufferSwitch method, too.
\r
3608 case kAsioSupportsTimeCode:
\r
3609 // Informs the driver whether application is interested in time
\r
3610 // code info. If an application does not need to know about time
\r
3611 // code, the driver has less work to do.
\r
3618 static const char* getAsioErrorString( ASIOError result )
\r
3623 const char*message;
\r
3626 static const Messages m[] =
\r
3628 { ASE_NotPresent, "Hardware input or output is not present or available." },
\r
3629 { ASE_HWMalfunction, "Hardware is malfunctioning." },
\r
3630 { ASE_InvalidParameter, "Invalid input parameter." },
\r
3631 { ASE_InvalidMode, "Invalid mode." },
\r
3632 { ASE_SPNotAdvancing, "Sample position not advancing." },
\r
3633 { ASE_NoClock, "Sample clock or rate cannot be determined or is not present." },
\r
3634 { ASE_NoMemory, "Not enough memory to complete the request." }
\r
3637 for ( unsigned int i = 0; i < sizeof(m)/sizeof(m[0]); ++i )
\r
3638 if ( m[i].value == result ) return m[i].message;
\r
3640 return "Unknown error.";
\r
3643 //******************** End of __WINDOWS_ASIO__ *********************//
\r
3647 #if defined(__WINDOWS_WASAPI__) // Windows WASAPI API
\r
3649 // Authored by Marcus Tomlinson <themarcustomlinson@gmail.com>, April 2014
\r
3650 // - Introduces support for the Windows WASAPI API
\r
3651 // - Aims to deliver bit streams to and from hardware at the lowest possible latency, via the absolute minimum buffer sizes required
\r
3652 // - Provides flexible stream configuration to an otherwise strict and inflexible WASAPI interface
\r
3653 // - Includes automatic internal conversion of sample rate and buffer size between hardware and the user
\r
3658 #include <audioclient.h>
\r
3660 #include <mmdeviceapi.h>
\r
3661 #include <functiondiscoverykeys_devpkey.h>
\r
3663 //=============================================================================
\r
3665 #define SAFE_RELEASE( objectPtr )\
\r
3668 objectPtr->Release();\
\r
3669 objectPtr = NULL;\
\r
3672 typedef HANDLE ( __stdcall *TAvSetMmThreadCharacteristicsPtr )( LPCWSTR TaskName, LPDWORD TaskIndex );
\r
3674 //-----------------------------------------------------------------------------
\r
3676 // WASAPI dictates stream sample rate, format, channel count, and in some cases, buffer size.
\r
3677 // Therefore we must perform all necessary conversions to user buffers in order to satisfy these
\r
3678 // requirements. WasapiBuffer ring buffers are used between HwIn->UserIn and UserOut->HwOut to
\r
3679 // provide intermediate storage for read / write synchronization.
\r
3680 class WasapiBuffer
\r
3684 : buffer_( NULL ),
\r
3693 // sets the length of the internal ring buffer
\r
3694 void setBufferSize( unsigned int bufferSize, unsigned int formatBytes ) {
\r
3697 buffer_ = ( char* ) calloc( bufferSize, formatBytes );
\r
3699 bufferSize_ = bufferSize;
\r
3704 // attempt to push a buffer into the ring buffer at the current "in" index
\r
3705 bool pushBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
\r
3707 if ( !buffer || // incoming buffer is NULL
\r
3708 bufferSize == 0 || // incoming buffer has no data
\r
3709 bufferSize > bufferSize_ ) // incoming buffer too large
\r
3714 unsigned int relOutIndex = outIndex_;
\r
3715 unsigned int inIndexEnd = inIndex_ + bufferSize;
\r
3716 if ( relOutIndex < inIndex_ && inIndexEnd >= bufferSize_ ) {
\r
3717 relOutIndex += bufferSize_;
\r
3720 // "in" index can end on the "out" index but cannot begin at it
\r
3721 if ( inIndex_ <= relOutIndex && inIndexEnd > relOutIndex ) {
\r
3722 return false; // not enough space between "in" index and "out" index
\r
3725 // copy buffer from external to internal
\r
3726 int fromZeroSize = inIndex_ + bufferSize - bufferSize_;
\r
3727 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
\r
3728 int fromInSize = bufferSize - fromZeroSize;
\r
3732 case RTAUDIO_SINT8:
\r
3733 memcpy( &( ( char* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( char ) );
\r
3734 memcpy( buffer_, &( ( char* ) buffer )[fromInSize], fromZeroSize * sizeof( char ) );
\r
3736 case RTAUDIO_SINT16:
\r
3737 memcpy( &( ( short* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( short ) );
\r
3738 memcpy( buffer_, &( ( short* ) buffer )[fromInSize], fromZeroSize * sizeof( short ) );
\r
3740 case RTAUDIO_SINT24:
\r
3741 memcpy( &( ( S24* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( S24 ) );
\r
3742 memcpy( buffer_, &( ( S24* ) buffer )[fromInSize], fromZeroSize * sizeof( S24 ) );
\r
3744 case RTAUDIO_SINT32:
\r
3745 memcpy( &( ( int* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( int ) );
\r
3746 memcpy( buffer_, &( ( int* ) buffer )[fromInSize], fromZeroSize * sizeof( int ) );
\r
3748 case RTAUDIO_FLOAT32:
\r
3749 memcpy( &( ( float* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( float ) );
\r
3750 memcpy( buffer_, &( ( float* ) buffer )[fromInSize], fromZeroSize * sizeof( float ) );
\r
3752 case RTAUDIO_FLOAT64:
\r
3753 memcpy( &( ( double* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( double ) );
\r
3754 memcpy( buffer_, &( ( double* ) buffer )[fromInSize], fromZeroSize * sizeof( double ) );
\r
3758 // update "in" index
\r
3759 inIndex_ += bufferSize;
\r
3760 inIndex_ %= bufferSize_;
\r
3765 // attempt to pull a buffer from the ring buffer from the current "out" index
\r
3766 bool pullBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
\r
3768 if ( !buffer || // incoming buffer is NULL
\r
3769 bufferSize == 0 || // incoming buffer has no data
\r
3770 bufferSize > bufferSize_ ) // incoming buffer too large
\r
3775 unsigned int relInIndex = inIndex_;
\r
3776 unsigned int outIndexEnd = outIndex_ + bufferSize;
\r
3777 if ( relInIndex < outIndex_ && outIndexEnd >= bufferSize_ ) {
\r
3778 relInIndex += bufferSize_;
\r
3781 // "out" index can begin at and end on the "in" index
\r
3782 if ( outIndex_ < relInIndex && outIndexEnd > relInIndex ) {
\r
3783 return false; // not enough space between "out" index and "in" index
\r
3786 // copy buffer from internal to external
\r
3787 int fromZeroSize = outIndex_ + bufferSize - bufferSize_;
\r
3788 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
\r
3789 int fromOutSize = bufferSize - fromZeroSize;
\r
3793 case RTAUDIO_SINT8:
\r
3794 memcpy( buffer, &( ( char* ) buffer_ )[outIndex_], fromOutSize * sizeof( char ) );
\r
3795 memcpy( &( ( char* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( char ) );
\r
3797 case RTAUDIO_SINT16:
\r
3798 memcpy( buffer, &( ( short* ) buffer_ )[outIndex_], fromOutSize * sizeof( short ) );
\r
3799 memcpy( &( ( short* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( short ) );
\r
3801 case RTAUDIO_SINT24:
\r
3802 memcpy( buffer, &( ( S24* ) buffer_ )[outIndex_], fromOutSize * sizeof( S24 ) );
\r
3803 memcpy( &( ( S24* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( S24 ) );
\r
3805 case RTAUDIO_SINT32:
\r
3806 memcpy( buffer, &( ( int* ) buffer_ )[outIndex_], fromOutSize * sizeof( int ) );
\r
3807 memcpy( &( ( int* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( int ) );
\r
3809 case RTAUDIO_FLOAT32:
\r
3810 memcpy( buffer, &( ( float* ) buffer_ )[outIndex_], fromOutSize * sizeof( float ) );
\r
3811 memcpy( &( ( float* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( float ) );
\r
3813 case RTAUDIO_FLOAT64:
\r
3814 memcpy( buffer, &( ( double* ) buffer_ )[outIndex_], fromOutSize * sizeof( double ) );
\r
3815 memcpy( &( ( double* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( double ) );
\r
3819 // update "out" index
\r
3820 outIndex_ += bufferSize;
\r
3821 outIndex_ %= bufferSize_;
\r
3828 unsigned int bufferSize_;
\r
3829 unsigned int inIndex_;
\r
3830 unsigned int outIndex_;
\r
3833 //-----------------------------------------------------------------------------
\r
3835 // In order to satisfy WASAPI's buffer requirements, we need a means of converting sample rate
\r
3836 // between HW and the user. The convertBufferWasapi function is used to perform this conversion
\r
3837 // between HwIn->UserIn and UserOut->HwOut during the stream callback loop.
\r
3838 // This sample rate converter favors speed over quality, and works best with conversions between
\r
3839 // one rate and its multiple.
\r
3840 void convertBufferWasapi( char* outBuffer,
\r
3841 const char* inBuffer,
\r
3842 const unsigned int& channelCount,
\r
3843 const unsigned int& inSampleRate,
\r
3844 const unsigned int& outSampleRate,
\r
3845 const unsigned int& inSampleCount,
\r
3846 unsigned int& outSampleCount,
\r
3847 const RtAudioFormat& format )
\r
3849 // calculate the new outSampleCount and relative sampleStep
\r
3850 float sampleRatio = ( float ) outSampleRate / inSampleRate;
\r
3851 float sampleStep = 1.0f / sampleRatio;
\r
3852 float inSampleFraction = 0.0f;
\r
3854 outSampleCount = ( unsigned int ) roundf( inSampleCount * sampleRatio );
\r
3856 // frame-by-frame, copy each relative input sample into it's corresponding output sample
\r
3857 for ( unsigned int outSample = 0; outSample < outSampleCount; outSample++ )
\r
3859 unsigned int inSample = ( unsigned int ) inSampleFraction;
\r
3863 case RTAUDIO_SINT8:
\r
3864 memcpy( &( ( char* ) outBuffer )[ outSample * channelCount ], &( ( char* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( char ) );
\r
3866 case RTAUDIO_SINT16:
\r
3867 memcpy( &( ( short* ) outBuffer )[ outSample * channelCount ], &( ( short* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( short ) );
\r
3869 case RTAUDIO_SINT24:
\r
3870 memcpy( &( ( S24* ) outBuffer )[ outSample * channelCount ], &( ( S24* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( S24 ) );
\r
3872 case RTAUDIO_SINT32:
\r
3873 memcpy( &( ( int* ) outBuffer )[ outSample * channelCount ], &( ( int* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( int ) );
\r
3875 case RTAUDIO_FLOAT32:
\r
3876 memcpy( &( ( float* ) outBuffer )[ outSample * channelCount ], &( ( float* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( float ) );
\r
3878 case RTAUDIO_FLOAT64:
\r
3879 memcpy( &( ( double* ) outBuffer )[ outSample * channelCount ], &( ( double* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( double ) );
\r
3883 // jump to next in sample
\r
3884 inSampleFraction += sampleStep;
\r
3888 //-----------------------------------------------------------------------------
\r
3890 // A structure to hold various information related to the WASAPI implementation.
\r
3891 struct WasapiHandle
\r
3893 IAudioClient* captureAudioClient;
\r
3894 IAudioClient* renderAudioClient;
\r
3895 IAudioCaptureClient* captureClient;
\r
3896 IAudioRenderClient* renderClient;
\r
3897 HANDLE captureEvent;
\r
3898 HANDLE renderEvent;
\r
3901 : captureAudioClient( NULL ),
\r
3902 renderAudioClient( NULL ),
\r
3903 captureClient( NULL ),
\r
3904 renderClient( NULL ),
\r
3905 captureEvent( NULL ),
\r
3906 renderEvent( NULL ) {}
\r
3909 //=============================================================================
\r
3911 RtApiWasapi::RtApiWasapi()
\r
3912 : coInitialized_( false ), deviceEnumerator_( NULL )
\r
3914 // WASAPI can run either apartment or multi-threaded
\r
3915 HRESULT hr = CoInitialize( NULL );
\r
3916 if ( !FAILED( hr ) )
\r
3917 coInitialized_ = true;
\r
3919 // Instantiate device enumerator
\r
3920 hr = CoCreateInstance( __uuidof( MMDeviceEnumerator ), NULL,
\r
3921 CLSCTX_ALL, __uuidof( IMMDeviceEnumerator ),
\r
3922 ( void** ) &deviceEnumerator_ );
\r
3924 if ( FAILED( hr ) ) {
\r
3925 errorText_ = "RtApiWasapi::RtApiWasapi: Unable to instantiate device enumerator";
\r
3926 error( RtAudioError::DRIVER_ERROR );
\r
3930 //-----------------------------------------------------------------------------
\r
3932 RtApiWasapi::~RtApiWasapi()
\r
3934 if ( stream_.state != STREAM_CLOSED )
\r
3937 SAFE_RELEASE( deviceEnumerator_ );
\r
3939 // If this object previously called CoInitialize()
\r
3940 if ( coInitialized_ )
\r
3944 //=============================================================================
\r
3946 unsigned int RtApiWasapi::getDeviceCount( void )
\r
3948 unsigned int captureDeviceCount = 0;
\r
3949 unsigned int renderDeviceCount = 0;
\r
3951 IMMDeviceCollection* captureDevices = NULL;
\r
3952 IMMDeviceCollection* renderDevices = NULL;
\r
3954 // Count capture devices
\r
3955 errorText_.clear();
\r
3956 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
\r
3957 if ( FAILED( hr ) ) {
\r
3958 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device collection.";
\r
3962 hr = captureDevices->GetCount( &captureDeviceCount );
\r
3963 if ( FAILED( hr ) ) {
\r
3964 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device count.";
\r
3968 // Count render devices
\r
3969 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
\r
3970 if ( FAILED( hr ) ) {
\r
3971 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device collection.";
\r
3975 hr = renderDevices->GetCount( &renderDeviceCount );
\r
3976 if ( FAILED( hr ) ) {
\r
3977 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device count.";
\r
3982 // release all references
\r
3983 SAFE_RELEASE( captureDevices );
\r
3984 SAFE_RELEASE( renderDevices );
\r
3986 if ( errorText_.empty() )
\r
3987 return captureDeviceCount + renderDeviceCount;
\r
3989 error( RtAudioError::DRIVER_ERROR );
\r
3993 //-----------------------------------------------------------------------------
\r
3995 RtAudio::DeviceInfo RtApiWasapi::getDeviceInfo( unsigned int device )
\r
3997 RtAudio::DeviceInfo info;
\r
3998 unsigned int captureDeviceCount = 0;
\r
3999 unsigned int renderDeviceCount = 0;
\r
4000 std::string defaultDeviceName;
\r
4001 bool isCaptureDevice = false;
\r
4003 PROPVARIANT deviceNameProp;
\r
4004 PROPVARIANT defaultDeviceNameProp;
\r
4006 IMMDeviceCollection* captureDevices = NULL;
\r
4007 IMMDeviceCollection* renderDevices = NULL;
\r
4008 IMMDevice* devicePtr = NULL;
\r
4009 IMMDevice* defaultDevicePtr = NULL;
\r
4010 IAudioClient* audioClient = NULL;
\r
4011 IPropertyStore* devicePropStore = NULL;
\r
4012 IPropertyStore* defaultDevicePropStore = NULL;
\r
4014 WAVEFORMATEX* deviceFormat = NULL;
\r
4015 WAVEFORMATEX* closestMatchFormat = NULL;
\r
4018 info.probed = false;
\r
4020 // Count capture devices
\r
4021 errorText_.clear();
\r
4022 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
\r
4023 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
\r
4024 if ( FAILED( hr ) ) {
\r
4025 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device collection.";
\r
4029 hr = captureDevices->GetCount( &captureDeviceCount );
\r
4030 if ( FAILED( hr ) ) {
\r
4031 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device count.";
\r
4035 // Count render devices
\r
4036 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
\r
4037 if ( FAILED( hr ) ) {
\r
4038 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device collection.";
\r
4042 hr = renderDevices->GetCount( &renderDeviceCount );
\r
4043 if ( FAILED( hr ) ) {
\r
4044 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device count.";
\r
4048 // validate device index
\r
4049 if ( device >= captureDeviceCount + renderDeviceCount ) {
\r
4050 errorText_ = "RtApiWasapi::getDeviceInfo: Invalid device index.";
\r
4051 errorType = RtAudioError::INVALID_USE;
\r
4055 // determine whether index falls within capture or render devices
\r
4056 if ( device >= renderDeviceCount ) {
\r
4057 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
\r
4058 if ( FAILED( hr ) ) {
\r
4059 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device handle.";
\r
4062 isCaptureDevice = true;
\r
4065 hr = renderDevices->Item( device, &devicePtr );
\r
4066 if ( FAILED( hr ) ) {
\r
4067 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device handle.";
\r
4070 isCaptureDevice = false;
\r
4073 // get default device name
\r
4074 if ( isCaptureDevice ) {
\r
4075 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eCapture, eConsole, &defaultDevicePtr );
\r
4076 if ( FAILED( hr ) ) {
\r
4077 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default capture device handle.";
\r
4082 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eRender, eConsole, &defaultDevicePtr );
\r
4083 if ( FAILED( hr ) ) {
\r
4084 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default render device handle.";
\r
4089 hr = defaultDevicePtr->OpenPropertyStore( STGM_READ, &defaultDevicePropStore );
\r
4090 if ( FAILED( hr ) ) {
\r
4091 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open default device property store.";
\r
4094 PropVariantInit( &defaultDeviceNameProp );
\r
4096 hr = defaultDevicePropStore->GetValue( PKEY_Device_FriendlyName, &defaultDeviceNameProp );
\r
4097 if ( FAILED( hr ) ) {
\r
4098 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default device property: PKEY_Device_FriendlyName.";
\r
4102 defaultDeviceName = convertCharPointerToStdString(defaultDeviceNameProp.pwszVal);
\r
4105 hr = devicePtr->OpenPropertyStore( STGM_READ, &devicePropStore );
\r
4106 if ( FAILED( hr ) ) {
\r
4107 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open device property store.";
\r
4111 PropVariantInit( &deviceNameProp );
\r
4113 hr = devicePropStore->GetValue( PKEY_Device_FriendlyName, &deviceNameProp );
\r
4114 if ( FAILED( hr ) ) {
\r
4115 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device property: PKEY_Device_FriendlyName.";
\r
4119 info.name =convertCharPointerToStdString(deviceNameProp.pwszVal);
\r
4122 if ( isCaptureDevice ) {
\r
4123 info.isDefaultInput = info.name == defaultDeviceName;
\r
4124 info.isDefaultOutput = false;
\r
4127 info.isDefaultInput = false;
\r
4128 info.isDefaultOutput = info.name == defaultDeviceName;
\r
4132 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL, NULL, ( void** ) &audioClient );
\r
4133 if ( FAILED( hr ) ) {
\r
4134 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device audio client.";
\r
4138 hr = audioClient->GetMixFormat( &deviceFormat );
\r
4139 if ( FAILED( hr ) ) {
\r
4140 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device mix format.";
\r
4144 if ( isCaptureDevice ) {
\r
4145 info.inputChannels = deviceFormat->nChannels;
\r
4146 info.outputChannels = 0;
\r
4147 info.duplexChannels = 0;
\r
4150 info.inputChannels = 0;
\r
4151 info.outputChannels = deviceFormat->nChannels;
\r
4152 info.duplexChannels = 0;
\r
4156 info.sampleRates.clear();
\r
4158 // allow support for all sample rates as we have a built-in sample rate converter
\r
4159 for ( unsigned int i = 0; i < MAX_SAMPLE_RATES; i++ ) {
\r
4160 info.sampleRates.push_back( SAMPLE_RATES[i] );
\r
4162 info.preferredSampleRate = deviceFormat->nSamplesPerSec;
\r
4165 info.nativeFormats = 0;
\r
4167 if ( deviceFormat->wFormatTag == WAVE_FORMAT_IEEE_FLOAT ||
\r
4168 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
\r
4169 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_IEEE_FLOAT ) )
\r
4171 if ( deviceFormat->wBitsPerSample == 32 ) {
\r
4172 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
4174 else if ( deviceFormat->wBitsPerSample == 64 ) {
\r
4175 info.nativeFormats |= RTAUDIO_FLOAT64;
\r
4178 else if ( deviceFormat->wFormatTag == WAVE_FORMAT_PCM ||
\r
4179 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
\r
4180 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_PCM ) )
\r
4182 if ( deviceFormat->wBitsPerSample == 8 ) {
\r
4183 info.nativeFormats |= RTAUDIO_SINT8;
\r
4185 else if ( deviceFormat->wBitsPerSample == 16 ) {
\r
4186 info.nativeFormats |= RTAUDIO_SINT16;
\r
4188 else if ( deviceFormat->wBitsPerSample == 24 ) {
\r
4189 info.nativeFormats |= RTAUDIO_SINT24;
\r
4191 else if ( deviceFormat->wBitsPerSample == 32 ) {
\r
4192 info.nativeFormats |= RTAUDIO_SINT32;
\r
4197 info.probed = true;
\r
4200 // release all references
\r
4201 PropVariantClear( &deviceNameProp );
\r
4202 PropVariantClear( &defaultDeviceNameProp );
\r
4204 SAFE_RELEASE( captureDevices );
\r
4205 SAFE_RELEASE( renderDevices );
\r
4206 SAFE_RELEASE( devicePtr );
\r
4207 SAFE_RELEASE( defaultDevicePtr );
\r
4208 SAFE_RELEASE( audioClient );
\r
4209 SAFE_RELEASE( devicePropStore );
\r
4210 SAFE_RELEASE( defaultDevicePropStore );
\r
4212 CoTaskMemFree( deviceFormat );
\r
4213 CoTaskMemFree( closestMatchFormat );
\r
4215 if ( !errorText_.empty() )
\r
4216 error( errorType );
\r
4220 //-----------------------------------------------------------------------------
\r
4222 unsigned int RtApiWasapi::getDefaultOutputDevice( void )
\r
4224 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
\r
4225 if ( getDeviceInfo( i ).isDefaultOutput ) {
\r
4233 //-----------------------------------------------------------------------------
\r
4235 unsigned int RtApiWasapi::getDefaultInputDevice( void )
\r
4237 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
\r
4238 if ( getDeviceInfo( i ).isDefaultInput ) {
\r
4246 //-----------------------------------------------------------------------------
\r
4248 void RtApiWasapi::closeStream( void )
\r
4250 if ( stream_.state == STREAM_CLOSED ) {
\r
4251 errorText_ = "RtApiWasapi::closeStream: No open stream to close.";
\r
4252 error( RtAudioError::WARNING );
\r
4256 if ( stream_.state != STREAM_STOPPED )
\r
4259 // clean up stream memory
\r
4260 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient )
\r
4261 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient )
\r
4263 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureClient )
\r
4264 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderClient )
\r
4266 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent )
\r
4267 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent );
\r
4269 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent )
\r
4270 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent );
\r
4272 delete ( WasapiHandle* ) stream_.apiHandle;
\r
4273 stream_.apiHandle = NULL;
\r
4275 for ( int i = 0; i < 2; i++ ) {
\r
4276 if ( stream_.userBuffer[i] ) {
\r
4277 free( stream_.userBuffer[i] );
\r
4278 stream_.userBuffer[i] = 0;
\r
4282 if ( stream_.deviceBuffer ) {
\r
4283 free( stream_.deviceBuffer );
\r
4284 stream_.deviceBuffer = 0;
\r
4287 // update stream state
\r
4288 stream_.state = STREAM_CLOSED;
\r
4291 //-----------------------------------------------------------------------------
\r
4293 void RtApiWasapi::startStream( void )
\r
4297 if ( stream_.state == STREAM_RUNNING ) {
\r
4298 errorText_ = "RtApiWasapi::startStream: The stream is already running.";
\r
4299 error( RtAudioError::WARNING );
\r
4303 // update stream state
\r
4304 stream_.state = STREAM_RUNNING;
\r
4306 // create WASAPI stream thread
\r
4307 stream_.callbackInfo.thread = ( ThreadHandle ) CreateThread( NULL, 0, runWasapiThread, this, CREATE_SUSPENDED, NULL );
\r
4309 if ( !stream_.callbackInfo.thread ) {
\r
4310 errorText_ = "RtApiWasapi::startStream: Unable to instantiate callback thread.";
\r
4311 error( RtAudioError::THREAD_ERROR );
\r
4314 SetThreadPriority( ( void* ) stream_.callbackInfo.thread, stream_.callbackInfo.priority );
\r
4315 ResumeThread( ( void* ) stream_.callbackInfo.thread );
\r
4319 //-----------------------------------------------------------------------------
\r
4321 void RtApiWasapi::stopStream( void )
\r
4325 if ( stream_.state == STREAM_STOPPED ) {
\r
4326 errorText_ = "RtApiWasapi::stopStream: The stream is already stopped.";
\r
4327 error( RtAudioError::WARNING );
\r
4331 // inform stream thread by setting stream state to STREAM_STOPPING
\r
4332 stream_.state = STREAM_STOPPING;
\r
4334 // wait until stream thread is stopped
\r
4335 while( stream_.state != STREAM_STOPPED ) {
\r
4339 // Wait for the last buffer to play before stopping.
\r
4340 Sleep( 1000 * stream_.bufferSize / stream_.sampleRate );
\r
4342 // stop capture client if applicable
\r
4343 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) {
\r
4344 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient->Stop();
\r
4345 if ( FAILED( hr ) ) {
\r
4346 errorText_ = "RtApiWasapi::stopStream: Unable to stop capture stream.";
\r
4347 error( RtAudioError::DRIVER_ERROR );
\r
4352 // stop render client if applicable
\r
4353 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) {
\r
4354 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient->Stop();
\r
4355 if ( FAILED( hr ) ) {
\r
4356 errorText_ = "RtApiWasapi::stopStream: Unable to stop render stream.";
\r
4357 error( RtAudioError::DRIVER_ERROR );
\r
4362 // close thread handle
\r
4363 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
\r
4364 errorText_ = "RtApiWasapi::stopStream: Unable to close callback thread.";
\r
4365 error( RtAudioError::THREAD_ERROR );
\r
4369 stream_.callbackInfo.thread = (ThreadHandle) NULL;
\r
4372 //-----------------------------------------------------------------------------
\r
4374 void RtApiWasapi::abortStream( void )
\r
4378 if ( stream_.state == STREAM_STOPPED ) {
\r
4379 errorText_ = "RtApiWasapi::abortStream: The stream is already stopped.";
\r
4380 error( RtAudioError::WARNING );
\r
4384 // inform stream thread by setting stream state to STREAM_STOPPING
\r
4385 stream_.state = STREAM_STOPPING;
\r
4387 // wait until stream thread is stopped
\r
4388 while ( stream_.state != STREAM_STOPPED ) {
\r
4392 // stop capture client if applicable
\r
4393 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) {
\r
4394 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient->Stop();
\r
4395 if ( FAILED( hr ) ) {
\r
4396 errorText_ = "RtApiWasapi::abortStream: Unable to stop capture stream.";
\r
4397 error( RtAudioError::DRIVER_ERROR );
\r
4402 // stop render client if applicable
\r
4403 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) {
\r
4404 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient->Stop();
\r
4405 if ( FAILED( hr ) ) {
\r
4406 errorText_ = "RtApiWasapi::abortStream: Unable to stop render stream.";
\r
4407 error( RtAudioError::DRIVER_ERROR );
\r
4412 // close thread handle
\r
4413 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
\r
4414 errorText_ = "RtApiWasapi::abortStream: Unable to close callback thread.";
\r
4415 error( RtAudioError::THREAD_ERROR );
\r
4419 stream_.callbackInfo.thread = (ThreadHandle) NULL;
\r
4422 //-----------------------------------------------------------------------------
\r
4424 bool RtApiWasapi::probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
4425 unsigned int firstChannel, unsigned int sampleRate,
\r
4426 RtAudioFormat format, unsigned int* bufferSize,
\r
4427 RtAudio::StreamOptions* options )
\r
4429 bool methodResult = FAILURE;
\r
4430 unsigned int captureDeviceCount = 0;
\r
4431 unsigned int renderDeviceCount = 0;
\r
4433 IMMDeviceCollection* captureDevices = NULL;
\r
4434 IMMDeviceCollection* renderDevices = NULL;
\r
4435 IMMDevice* devicePtr = NULL;
\r
4436 WAVEFORMATEX* deviceFormat = NULL;
\r
4437 unsigned int bufferBytes;
\r
4438 stream_.state = STREAM_STOPPED;
\r
4440 // create API Handle if not already created
\r
4441 if ( !stream_.apiHandle )
\r
4442 stream_.apiHandle = ( void* ) new WasapiHandle();
\r
4444 // Count capture devices
\r
4445 errorText_.clear();
\r
4446 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
\r
4447 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
\r
4448 if ( FAILED( hr ) ) {
\r
4449 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device collection.";
\r
4453 hr = captureDevices->GetCount( &captureDeviceCount );
\r
4454 if ( FAILED( hr ) ) {
\r
4455 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device count.";
\r
4459 // Count render devices
\r
4460 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
\r
4461 if ( FAILED( hr ) ) {
\r
4462 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device collection.";
\r
4466 hr = renderDevices->GetCount( &renderDeviceCount );
\r
4467 if ( FAILED( hr ) ) {
\r
4468 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device count.";
\r
4472 // validate device index
\r
4473 if ( device >= captureDeviceCount + renderDeviceCount ) {
\r
4474 errorType = RtAudioError::INVALID_USE;
\r
4475 errorText_ = "RtApiWasapi::probeDeviceOpen: Invalid device index.";
\r
4479 // determine whether index falls within capture or render devices
\r
4480 if ( device >= renderDeviceCount ) {
\r
4481 if ( mode != INPUT ) {
\r
4482 errorType = RtAudioError::INVALID_USE;
\r
4483 errorText_ = "RtApiWasapi::probeDeviceOpen: Capture device selected as output device.";
\r
4487 // retrieve captureAudioClient from devicePtr
\r
4488 IAudioClient*& captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
\r
4490 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
\r
4491 if ( FAILED( hr ) ) {
\r
4492 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device handle.";
\r
4496 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
\r
4497 NULL, ( void** ) &captureAudioClient );
\r
4498 if ( FAILED( hr ) ) {
\r
4499 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device audio client.";
\r
4503 hr = captureAudioClient->GetMixFormat( &deviceFormat );
\r
4504 if ( FAILED( hr ) ) {
\r
4505 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device mix format.";
\r
4509 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
\r
4510 captureAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
\r
4513 if ( mode != OUTPUT ) {
\r
4514 errorType = RtAudioError::INVALID_USE;
\r
4515 errorText_ = "RtApiWasapi::probeDeviceOpen: Render device selected as input device.";
\r
4519 // retrieve renderAudioClient from devicePtr
\r
4520 IAudioClient*& renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
\r
4522 hr = renderDevices->Item( device, &devicePtr );
\r
4523 if ( FAILED( hr ) ) {
\r
4524 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device handle.";
\r
4528 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
\r
4529 NULL, ( void** ) &renderAudioClient );
\r
4530 if ( FAILED( hr ) ) {
\r
4531 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device audio client.";
\r
4535 hr = renderAudioClient->GetMixFormat( &deviceFormat );
\r
4536 if ( FAILED( hr ) ) {
\r
4537 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device mix format.";
\r
4541 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
\r
4542 renderAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
\r
4545 // fill stream data
\r
4546 if ( ( stream_.mode == OUTPUT && mode == INPUT ) ||
\r
4547 ( stream_.mode == INPUT && mode == OUTPUT ) ) {
\r
4548 stream_.mode = DUPLEX;
\r
4551 stream_.mode = mode;
\r
4554 stream_.device[mode] = device;
\r
4555 stream_.doByteSwap[mode] = false;
\r
4556 stream_.sampleRate = sampleRate;
\r
4557 stream_.bufferSize = *bufferSize;
\r
4558 stream_.nBuffers = 1;
\r
4559 stream_.nUserChannels[mode] = channels;
\r
4560 stream_.channelOffset[mode] = firstChannel;
\r
4561 stream_.userFormat = format;
\r
4562 stream_.deviceFormat[mode] = getDeviceInfo( device ).nativeFormats;
\r
4564 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
\r
4565 stream_.userInterleaved = false;
\r
4567 stream_.userInterleaved = true;
\r
4568 stream_.deviceInterleaved[mode] = true;
\r
4570 // Set flags for buffer conversion.
\r
4571 stream_.doConvertBuffer[mode] = false;
\r
4572 if ( stream_.userFormat != stream_.deviceFormat[mode] ||
\r
4573 stream_.nUserChannels != stream_.nDeviceChannels )
\r
4574 stream_.doConvertBuffer[mode] = true;
\r
4575 else if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
4576 stream_.nUserChannels[mode] > 1 )
\r
4577 stream_.doConvertBuffer[mode] = true;
\r
4579 if ( stream_.doConvertBuffer[mode] )
\r
4580 setConvertInfo( mode, 0 );
\r
4582 // Allocate necessary internal buffers
\r
4583 bufferBytes = stream_.nUserChannels[mode] * stream_.bufferSize * formatBytes( stream_.userFormat );
\r
4585 stream_.userBuffer[mode] = ( char* ) calloc( bufferBytes, 1 );
\r
4586 if ( !stream_.userBuffer[mode] ) {
\r
4587 errorType = RtAudioError::MEMORY_ERROR;
\r
4588 errorText_ = "RtApiWasapi::probeDeviceOpen: Error allocating user buffer memory.";
\r
4592 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME )
\r
4593 stream_.callbackInfo.priority = 15;
\r
4595 stream_.callbackInfo.priority = 0;
\r
4597 ///! TODO: RTAUDIO_MINIMIZE_LATENCY // Provide stream buffers directly to callback
\r
4598 ///! TODO: RTAUDIO_HOG_DEVICE // Exclusive mode
\r
4600 methodResult = SUCCESS;
\r
4604 SAFE_RELEASE( captureDevices );
\r
4605 SAFE_RELEASE( renderDevices );
\r
4606 SAFE_RELEASE( devicePtr );
\r
4607 CoTaskMemFree( deviceFormat );
\r
4609 // if method failed, close the stream
\r
4610 if ( methodResult == FAILURE )
\r
4613 if ( !errorText_.empty() )
\r
4614 error( errorType );
\r
4615 return methodResult;
\r
4618 //=============================================================================
\r
4620 DWORD WINAPI RtApiWasapi::runWasapiThread( void* wasapiPtr )
\r
4623 ( ( RtApiWasapi* ) wasapiPtr )->wasapiThread();
\r
4628 DWORD WINAPI RtApiWasapi::stopWasapiThread( void* wasapiPtr )
\r
4631 ( ( RtApiWasapi* ) wasapiPtr )->stopStream();
\r
4636 DWORD WINAPI RtApiWasapi::abortWasapiThread( void* wasapiPtr )
\r
4639 ( ( RtApiWasapi* ) wasapiPtr )->abortStream();
\r
4644 //-----------------------------------------------------------------------------
\r
4646 void RtApiWasapi::wasapiThread()
\r
4648 // as this is a new thread, we must CoInitialize it
\r
4649 CoInitialize( NULL );
\r
4653 IAudioClient* captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
\r
4654 IAudioClient* renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
\r
4655 IAudioCaptureClient* captureClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureClient;
\r
4656 IAudioRenderClient* renderClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderClient;
\r
4657 HANDLE captureEvent = ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent;
\r
4658 HANDLE renderEvent = ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent;
\r
4660 WAVEFORMATEX* captureFormat = NULL;
\r
4661 WAVEFORMATEX* renderFormat = NULL;
\r
4662 float captureSrRatio = 0.0f;
\r
4663 float renderSrRatio = 0.0f;
\r
4664 WasapiBuffer captureBuffer;
\r
4665 WasapiBuffer renderBuffer;
\r
4667 // declare local stream variables
\r
4668 RtAudioCallback callback = ( RtAudioCallback ) stream_.callbackInfo.callback;
\r
4669 BYTE* streamBuffer = NULL;
\r
4670 unsigned long captureFlags = 0;
\r
4671 unsigned int bufferFrameCount = 0;
\r
4672 unsigned int numFramesPadding = 0;
\r
4673 unsigned int convBufferSize = 0;
\r
4674 bool callbackPushed = false;
\r
4675 bool callbackPulled = false;
\r
4676 bool callbackStopped = false;
\r
4677 int callbackResult = 0;
\r
4679 // convBuffer is used to store converted buffers between WASAPI and the user
\r
4680 char* convBuffer = NULL;
\r
4681 unsigned int convBuffSize = 0;
\r
4682 unsigned int deviceBuffSize = 0;
\r
4684 errorText_.clear();
\r
4685 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
\r
4687 // Attempt to assign "Pro Audio" characteristic to thread
\r
4688 HMODULE AvrtDll = LoadLibrary( (LPCTSTR) "AVRT.dll" );
\r
4690 DWORD taskIndex = 0;
\r
4691 TAvSetMmThreadCharacteristicsPtr AvSetMmThreadCharacteristicsPtr = ( TAvSetMmThreadCharacteristicsPtr ) GetProcAddress( AvrtDll, "AvSetMmThreadCharacteristicsW" );
\r
4692 AvSetMmThreadCharacteristicsPtr( L"Pro Audio", &taskIndex );
\r
4693 FreeLibrary( AvrtDll );
\r
4696 // start capture stream if applicable
\r
4697 if ( captureAudioClient ) {
\r
4698 hr = captureAudioClient->GetMixFormat( &captureFormat );
\r
4699 if ( FAILED( hr ) ) {
\r
4700 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
\r
4704 captureSrRatio = ( ( float ) captureFormat->nSamplesPerSec / stream_.sampleRate );
\r
4706 // initialize capture stream according to desire buffer size
\r
4707 float desiredBufferSize = stream_.bufferSize * captureSrRatio;
\r
4708 REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) desiredBufferSize * 10000000 / captureFormat->nSamplesPerSec );
\r
4710 if ( !captureClient ) {
\r
4711 hr = captureAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
\r
4712 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
\r
4713 desiredBufferPeriod,
\r
4714 desiredBufferPeriod,
\r
4717 if ( FAILED( hr ) ) {
\r
4718 errorText_ = "RtApiWasapi::wasapiThread: Unable to initialize capture audio client.";
\r
4722 hr = captureAudioClient->GetService( __uuidof( IAudioCaptureClient ),
\r
4723 ( void** ) &captureClient );
\r
4724 if ( FAILED( hr ) ) {
\r
4725 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve capture client handle.";
\r
4729 // configure captureEvent to trigger on every available capture buffer
\r
4730 captureEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
\r
4731 if ( !captureEvent ) {
\r
4732 errorType = RtAudioError::SYSTEM_ERROR;
\r
4733 errorText_ = "RtApiWasapi::wasapiThread: Unable to create capture event.";
\r
4737 hr = captureAudioClient->SetEventHandle( captureEvent );
\r
4738 if ( FAILED( hr ) ) {
\r
4739 errorText_ = "RtApiWasapi::wasapiThread: Unable to set capture event handle.";
\r
4743 ( ( WasapiHandle* ) stream_.apiHandle )->captureClient = captureClient;
\r
4744 ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent = captureEvent;
\r
4747 unsigned int inBufferSize = 0;
\r
4748 hr = captureAudioClient->GetBufferSize( &inBufferSize );
\r
4749 if ( FAILED( hr ) ) {
\r
4750 errorText_ = "RtApiWasapi::wasapiThread: Unable to get capture buffer size.";
\r
4754 // scale outBufferSize according to stream->user sample rate ratio
\r
4755 unsigned int outBufferSize = ( unsigned int ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT];
\r
4756 inBufferSize *= stream_.nDeviceChannels[INPUT];
\r
4758 // set captureBuffer size
\r
4759 captureBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[INPUT] ) );
\r
4761 // reset the capture stream
\r
4762 hr = captureAudioClient->Reset();
\r
4763 if ( FAILED( hr ) ) {
\r
4764 errorText_ = "RtApiWasapi::wasapiThread: Unable to reset capture stream.";
\r
4768 // start the capture stream
\r
4769 hr = captureAudioClient->Start();
\r
4770 if ( FAILED( hr ) ) {
\r
4771 errorText_ = "RtApiWasapi::wasapiThread: Unable to start capture stream.";
\r
4776 // start render stream if applicable
\r
4777 if ( renderAudioClient ) {
\r
4778 hr = renderAudioClient->GetMixFormat( &renderFormat );
\r
4779 if ( FAILED( hr ) ) {
\r
4780 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
\r
4784 renderSrRatio = ( ( float ) renderFormat->nSamplesPerSec / stream_.sampleRate );
\r
4786 // initialize render stream according to desire buffer size
\r
4787 float desiredBufferSize = stream_.bufferSize * renderSrRatio;
\r
4788 REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) desiredBufferSize * 10000000 / renderFormat->nSamplesPerSec );
\r
4790 if ( !renderClient ) {
\r
4791 hr = renderAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
\r
4792 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
\r
4793 desiredBufferPeriod,
\r
4794 desiredBufferPeriod,
\r
4797 if ( FAILED( hr ) ) {
\r
4798 errorText_ = "RtApiWasapi::wasapiThread: Unable to initialize render audio client.";
\r
4802 hr = renderAudioClient->GetService( __uuidof( IAudioRenderClient ),
\r
4803 ( void** ) &renderClient );
\r
4804 if ( FAILED( hr ) ) {
\r
4805 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render client handle.";
\r
4809 // configure renderEvent to trigger on every available render buffer
\r
4810 renderEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
\r
4811 if ( !renderEvent ) {
\r
4812 errorType = RtAudioError::SYSTEM_ERROR;
\r
4813 errorText_ = "RtApiWasapi::wasapiThread: Unable to create render event.";
\r
4817 hr = renderAudioClient->SetEventHandle( renderEvent );
\r
4818 if ( FAILED( hr ) ) {
\r
4819 errorText_ = "RtApiWasapi::wasapiThread: Unable to set render event handle.";
\r
4823 ( ( WasapiHandle* ) stream_.apiHandle )->renderClient = renderClient;
\r
4824 ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent = renderEvent;
\r
4827 unsigned int outBufferSize = 0;
\r
4828 hr = renderAudioClient->GetBufferSize( &outBufferSize );
\r
4829 if ( FAILED( hr ) ) {
\r
4830 errorText_ = "RtApiWasapi::wasapiThread: Unable to get render buffer size.";
\r
4834 // scale inBufferSize according to user->stream sample rate ratio
\r
4835 unsigned int inBufferSize = ( unsigned int ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT];
\r
4836 outBufferSize *= stream_.nDeviceChannels[OUTPUT];
\r
4838 // set renderBuffer size
\r
4839 renderBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[OUTPUT] ) );
\r
4841 // reset the render stream
\r
4842 hr = renderAudioClient->Reset();
\r
4843 if ( FAILED( hr ) ) {
\r
4844 errorText_ = "RtApiWasapi::wasapiThread: Unable to reset render stream.";
\r
4848 // start the render stream
\r
4849 hr = renderAudioClient->Start();
\r
4850 if ( FAILED( hr ) ) {
\r
4851 errorText_ = "RtApiWasapi::wasapiThread: Unable to start render stream.";
\r
4856 if ( stream_.mode == INPUT ) {
\r
4857 convBuffSize = ( size_t ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
\r
4858 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
\r
4860 else if ( stream_.mode == OUTPUT ) {
\r
4861 convBuffSize = ( size_t ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
\r
4862 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
\r
4864 else if ( stream_.mode == DUPLEX ) {
\r
4865 convBuffSize = std::max( ( size_t ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
\r
4866 ( size_t ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
\r
4867 deviceBuffSize = std::max( stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
\r
4868 stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
\r
4871 convBuffer = ( char* ) malloc( convBuffSize );
\r
4872 stream_.deviceBuffer = ( char* ) malloc( deviceBuffSize );
\r
4873 if ( !convBuffer || !stream_.deviceBuffer ) {
\r
4874 errorType = RtAudioError::MEMORY_ERROR;
\r
4875 errorText_ = "RtApiWasapi::wasapiThread: Error allocating device buffer memory.";
\r
4879 // stream process loop
\r
4880 while ( stream_.state != STREAM_STOPPING ) {
\r
4881 if ( !callbackPulled ) {
\r
4884 // 1. Pull callback buffer from inputBuffer
\r
4885 // 2. If 1. was successful: Convert callback buffer to user sample rate and channel count
\r
4886 // Convert callback buffer to user format
\r
4888 if ( captureAudioClient ) {
\r
4889 // Pull callback buffer from inputBuffer
\r
4890 callbackPulled = captureBuffer.pullBuffer( convBuffer,
\r
4891 ( unsigned int ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT],
\r
4892 stream_.deviceFormat[INPUT] );
\r
4894 if ( callbackPulled ) {
\r
4895 // Convert callback buffer to user sample rate
\r
4896 convertBufferWasapi( stream_.deviceBuffer,
\r
4898 stream_.nDeviceChannels[INPUT],
\r
4899 captureFormat->nSamplesPerSec,
\r
4900 stream_.sampleRate,
\r
4901 ( unsigned int ) ( stream_.bufferSize * captureSrRatio ),
\r
4903 stream_.deviceFormat[INPUT] );
\r
4905 if ( stream_.doConvertBuffer[INPUT] ) {
\r
4906 // Convert callback buffer to user format
\r
4907 convertBuffer( stream_.userBuffer[INPUT],
\r
4908 stream_.deviceBuffer,
\r
4909 stream_.convertInfo[INPUT] );
\r
4912 // no further conversion, simple copy deviceBuffer to userBuffer
\r
4913 memcpy( stream_.userBuffer[INPUT],
\r
4914 stream_.deviceBuffer,
\r
4915 stream_.bufferSize * stream_.nUserChannels[INPUT] * formatBytes( stream_.userFormat ) );
\r
4920 // if there is no capture stream, set callbackPulled flag
\r
4921 callbackPulled = true;
\r
4924 // Execute Callback
\r
4925 // ================
\r
4926 // 1. Execute user callback method
\r
4927 // 2. Handle return value from callback
\r
4929 // if callback has not requested the stream to stop
\r
4930 if ( callbackPulled && !callbackStopped ) {
\r
4931 // Execute user callback method
\r
4932 callbackResult = callback( stream_.userBuffer[OUTPUT],
\r
4933 stream_.userBuffer[INPUT],
\r
4934 stream_.bufferSize,
\r
4936 captureFlags & AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY ? RTAUDIO_INPUT_OVERFLOW : 0,
\r
4937 stream_.callbackInfo.userData );
\r
4939 // Handle return value from callback
\r
4940 if ( callbackResult == 1 ) {
\r
4941 // instantiate a thread to stop this thread
\r
4942 HANDLE threadHandle = CreateThread( NULL, 0, stopWasapiThread, this, 0, NULL );
\r
4943 if ( !threadHandle ) {
\r
4944 errorType = RtAudioError::THREAD_ERROR;
\r
4945 errorText_ = "RtApiWasapi::wasapiThread: Unable to instantiate stream stop thread.";
\r
4948 else if ( !CloseHandle( threadHandle ) ) {
\r
4949 errorType = RtAudioError::THREAD_ERROR;
\r
4950 errorText_ = "RtApiWasapi::wasapiThread: Unable to close stream stop thread handle.";
\r
4954 callbackStopped = true;
\r
4956 else if ( callbackResult == 2 ) {
\r
4957 // instantiate a thread to stop this thread
\r
4958 HANDLE threadHandle = CreateThread( NULL, 0, abortWasapiThread, this, 0, NULL );
\r
4959 if ( !threadHandle ) {
\r
4960 errorType = RtAudioError::THREAD_ERROR;
\r
4961 errorText_ = "RtApiWasapi::wasapiThread: Unable to instantiate stream abort thread.";
\r
4964 else if ( !CloseHandle( threadHandle ) ) {
\r
4965 errorType = RtAudioError::THREAD_ERROR;
\r
4966 errorText_ = "RtApiWasapi::wasapiThread: Unable to close stream abort thread handle.";
\r
4970 callbackStopped = true;
\r
4975 // Callback Output
\r
4976 // ===============
\r
4977 // 1. Convert callback buffer to stream format
\r
4978 // 2. Convert callback buffer to stream sample rate and channel count
\r
4979 // 3. Push callback buffer into outputBuffer
\r
4981 if ( renderAudioClient && callbackPulled ) {
\r
4982 if ( stream_.doConvertBuffer[OUTPUT] ) {
\r
4983 // Convert callback buffer to stream format
\r
4984 convertBuffer( stream_.deviceBuffer,
\r
4985 stream_.userBuffer[OUTPUT],
\r
4986 stream_.convertInfo[OUTPUT] );
\r
4990 // Convert callback buffer to stream sample rate
\r
4991 convertBufferWasapi( convBuffer,
\r
4992 stream_.deviceBuffer,
\r
4993 stream_.nDeviceChannels[OUTPUT],
\r
4994 stream_.sampleRate,
\r
4995 renderFormat->nSamplesPerSec,
\r
4996 stream_.bufferSize,
\r
4998 stream_.deviceFormat[OUTPUT] );
\r
5000 // Push callback buffer into outputBuffer
\r
5001 callbackPushed = renderBuffer.pushBuffer( convBuffer,
\r
5002 convBufferSize * stream_.nDeviceChannels[OUTPUT],
\r
5003 stream_.deviceFormat[OUTPUT] );
\r
5006 // if there is no render stream, set callbackPushed flag
\r
5007 callbackPushed = true;
\r
5012 // 1. Get capture buffer from stream
\r
5013 // 2. Push capture buffer into inputBuffer
\r
5014 // 3. If 2. was successful: Release capture buffer
\r
5016 if ( captureAudioClient ) {
\r
5017 // if the callback input buffer was not pulled from captureBuffer, wait for next capture event
\r
5018 if ( !callbackPulled ) {
\r
5019 WaitForSingleObject( captureEvent, INFINITE );
\r
5022 // Get capture buffer from stream
\r
5023 hr = captureClient->GetBuffer( &streamBuffer,
\r
5024 &bufferFrameCount,
\r
5025 &captureFlags, NULL, NULL );
\r
5026 if ( FAILED( hr ) ) {
\r
5027 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve capture buffer.";
\r
5031 if ( bufferFrameCount != 0 ) {
\r
5032 // Push capture buffer into inputBuffer
\r
5033 if ( captureBuffer.pushBuffer( ( char* ) streamBuffer,
\r
5034 bufferFrameCount * stream_.nDeviceChannels[INPUT],
\r
5035 stream_.deviceFormat[INPUT] ) )
\r
5037 // Release capture buffer
\r
5038 hr = captureClient->ReleaseBuffer( bufferFrameCount );
\r
5039 if ( FAILED( hr ) ) {
\r
5040 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
\r
5046 // Inform WASAPI that capture was unsuccessful
\r
5047 hr = captureClient->ReleaseBuffer( 0 );
\r
5048 if ( FAILED( hr ) ) {
\r
5049 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
\r
5056 // Inform WASAPI that capture was unsuccessful
\r
5057 hr = captureClient->ReleaseBuffer( 0 );
\r
5058 if ( FAILED( hr ) ) {
\r
5059 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
\r
5067 // 1. Get render buffer from stream
\r
5068 // 2. Pull next buffer from outputBuffer
\r
5069 // 3. If 2. was successful: Fill render buffer with next buffer
\r
5070 // Release render buffer
\r
5072 if ( renderAudioClient ) {
\r
5073 // if the callback output buffer was not pushed to renderBuffer, wait for next render event
\r
5074 if ( callbackPulled && !callbackPushed ) {
\r
5075 WaitForSingleObject( renderEvent, INFINITE );
\r
5078 // Get render buffer from stream
\r
5079 hr = renderAudioClient->GetBufferSize( &bufferFrameCount );
\r
5080 if ( FAILED( hr ) ) {
\r
5081 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer size.";
\r
5085 hr = renderAudioClient->GetCurrentPadding( &numFramesPadding );
\r
5086 if ( FAILED( hr ) ) {
\r
5087 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer padding.";
\r
5091 bufferFrameCount -= numFramesPadding;
\r
5093 if ( bufferFrameCount != 0 ) {
\r
5094 hr = renderClient->GetBuffer( bufferFrameCount, &streamBuffer );
\r
5095 if ( FAILED( hr ) ) {
\r
5096 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer.";
\r
5100 // Pull next buffer from outputBuffer
\r
5101 // Fill render buffer with next buffer
\r
5102 if ( renderBuffer.pullBuffer( ( char* ) streamBuffer,
\r
5103 bufferFrameCount * stream_.nDeviceChannels[OUTPUT],
\r
5104 stream_.deviceFormat[OUTPUT] ) )
\r
5106 // Release render buffer
\r
5107 hr = renderClient->ReleaseBuffer( bufferFrameCount, 0 );
\r
5108 if ( FAILED( hr ) ) {
\r
5109 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
\r
5115 // Inform WASAPI that render was unsuccessful
\r
5116 hr = renderClient->ReleaseBuffer( 0, 0 );
\r
5117 if ( FAILED( hr ) ) {
\r
5118 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
\r
5125 // Inform WASAPI that render was unsuccessful
\r
5126 hr = renderClient->ReleaseBuffer( 0, 0 );
\r
5127 if ( FAILED( hr ) ) {
\r
5128 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
\r
5134 // if the callback buffer was pushed renderBuffer reset callbackPulled flag
\r
5135 if ( callbackPushed ) {
\r
5136 callbackPulled = false;
\r
5139 // tick stream time
\r
5140 RtApi::tickStreamTime();
\r
5145 CoTaskMemFree( captureFormat );
\r
5146 CoTaskMemFree( renderFormat );
\r
5148 free ( convBuffer );
\r
5152 // update stream state
\r
5153 stream_.state = STREAM_STOPPED;
\r
5155 if ( errorText_.empty() )
\r
5158 error( errorType );
\r
5161 //******************** End of __WINDOWS_WASAPI__ *********************//
\r
5165 #if defined(__WINDOWS_DS__) // Windows DirectSound API
\r
5167 // Modified by Robin Davies, October 2005
\r
5168 // - Improvements to DirectX pointer chasing.
\r
5169 // - Bug fix for non-power-of-two Asio granularity used by Edirol PCR-A30.
\r
5170 // - Auto-call CoInitialize for DSOUND and ASIO platforms.
\r
5171 // Various revisions for RtAudio 4.0 by Gary Scavone, April 2007
\r
5172 // Changed device query structure for RtAudio 4.0.7, January 2010
\r
5174 #include <dsound.h>
\r
5175 #include <assert.h>
\r
5176 #include <algorithm>
\r
5178 #if defined(__MINGW32__)
\r
5179 // missing from latest mingw winapi
\r
5180 #define WAVE_FORMAT_96M08 0x00010000 /* 96 kHz, Mono, 8-bit */
\r
5181 #define WAVE_FORMAT_96S08 0x00020000 /* 96 kHz, Stereo, 8-bit */
\r
5182 #define WAVE_FORMAT_96M16 0x00040000 /* 96 kHz, Mono, 16-bit */
\r
5183 #define WAVE_FORMAT_96S16 0x00080000 /* 96 kHz, Stereo, 16-bit */
\r
5186 #define MINIMUM_DEVICE_BUFFER_SIZE 32768
\r
5188 #ifdef _MSC_VER // if Microsoft Visual C++
\r
5189 #pragma comment( lib, "winmm.lib" ) // then, auto-link winmm.lib. Otherwise, it has to be added manually.
\r
5192 static inline DWORD dsPointerBetween( DWORD pointer, DWORD laterPointer, DWORD earlierPointer, DWORD bufferSize )
\r
5194 if ( pointer > bufferSize ) pointer -= bufferSize;
\r
5195 if ( laterPointer < earlierPointer ) laterPointer += bufferSize;
\r
5196 if ( pointer < earlierPointer ) pointer += bufferSize;
\r
5197 return pointer >= earlierPointer && pointer < laterPointer;
\r
5200 // A structure to hold various information related to the DirectSound
\r
5201 // API implementation.
\r
5203 unsigned int drainCounter; // Tracks callback counts when draining
\r
5204 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
5208 UINT bufferPointer[2];
\r
5209 DWORD dsBufferSize[2];
\r
5210 DWORD dsPointerLeadTime[2]; // the number of bytes ahead of the safe pointer to lead by.
\r
5214 :drainCounter(0), internalDrain(false) { id[0] = 0; id[1] = 0; buffer[0] = 0; buffer[1] = 0; xrun[0] = false; xrun[1] = false; bufferPointer[0] = 0; bufferPointer[1] = 0; }
\r
5217 // Declarations for utility functions, callbacks, and structures
\r
5218 // specific to the DirectSound implementation.
\r
5219 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
\r
5220 LPCTSTR description,
\r
5222 LPVOID lpContext );
\r
5224 static const char* getErrorString( int code );
\r
5226 static unsigned __stdcall callbackHandler( void *ptr );
\r
5235 : found(false) { validId[0] = false; validId[1] = false; }
\r
5238 struct DsProbeData {
\r
5240 std::vector<struct DsDevice>* dsDevices;
\r
5243 RtApiDs :: RtApiDs()
\r
5245 // Dsound will run both-threaded. If CoInitialize fails, then just
\r
5246 // accept whatever the mainline chose for a threading model.
\r
5247 coInitialized_ = false;
\r
5248 HRESULT hr = CoInitialize( NULL );
\r
5249 if ( !FAILED( hr ) ) coInitialized_ = true;
\r
5252 RtApiDs :: ~RtApiDs()
\r
5254 if ( coInitialized_ ) CoUninitialize(); // balanced call.
\r
5255 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
5258 // The DirectSound default output is always the first device.
\r
5259 unsigned int RtApiDs :: getDefaultOutputDevice( void )
\r
5264 // The DirectSound default input is always the first input device,
\r
5265 // which is the first capture device enumerated.
\r
5266 unsigned int RtApiDs :: getDefaultInputDevice( void )
\r
5271 unsigned int RtApiDs :: getDeviceCount( void )
\r
5273 // Set query flag for previously found devices to false, so that we
\r
5274 // can check for any devices that have disappeared.
\r
5275 for ( unsigned int i=0; i<dsDevices.size(); i++ )
\r
5276 dsDevices[i].found = false;
\r
5278 // Query DirectSound devices.
\r
5279 struct DsProbeData probeInfo;
\r
5280 probeInfo.isInput = false;
\r
5281 probeInfo.dsDevices = &dsDevices;
\r
5282 HRESULT result = DirectSoundEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
\r
5283 if ( FAILED( result ) ) {
\r
5284 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating output devices!";
\r
5285 errorText_ = errorStream_.str();
\r
5286 error( RtAudioError::WARNING );
\r
5289 // Query DirectSoundCapture devices.
\r
5290 probeInfo.isInput = true;
\r
5291 result = DirectSoundCaptureEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
\r
5292 if ( FAILED( result ) ) {
\r
5293 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating input devices!";
\r
5294 errorText_ = errorStream_.str();
\r
5295 error( RtAudioError::WARNING );
\r
5298 // Clean out any devices that may have disappeared (code update submitted by Eli Zehngut).
\r
5299 for ( unsigned int i=0; i<dsDevices.size(); ) {
\r
5300 if ( dsDevices[i].found == false ) dsDevices.erase( dsDevices.begin() + i );
\r
5304 return static_cast<unsigned int>(dsDevices.size());
\r
5307 RtAudio::DeviceInfo RtApiDs :: getDeviceInfo( unsigned int device )
\r
5309 RtAudio::DeviceInfo info;
\r
5310 info.probed = false;
\r
5312 if ( dsDevices.size() == 0 ) {
\r
5313 // Force a query of all devices
\r
5315 if ( dsDevices.size() == 0 ) {
\r
5316 errorText_ = "RtApiDs::getDeviceInfo: no devices found!";
\r
5317 error( RtAudioError::INVALID_USE );
\r
5322 if ( device >= dsDevices.size() ) {
\r
5323 errorText_ = "RtApiDs::getDeviceInfo: device ID is invalid!";
\r
5324 error( RtAudioError::INVALID_USE );
\r
5329 if ( dsDevices[ device ].validId[0] == false ) goto probeInput;
\r
5331 LPDIRECTSOUND output;
\r
5333 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
\r
5334 if ( FAILED( result ) ) {
\r
5335 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
\r
5336 errorText_ = errorStream_.str();
\r
5337 error( RtAudioError::WARNING );
\r
5341 outCaps.dwSize = sizeof( outCaps );
\r
5342 result = output->GetCaps( &outCaps );
\r
5343 if ( FAILED( result ) ) {
\r
5344 output->Release();
\r
5345 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting capabilities!";
\r
5346 errorText_ = errorStream_.str();
\r
5347 error( RtAudioError::WARNING );
\r
5351 // Get output channel information.
\r
5352 info.outputChannels = ( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ? 2 : 1;
\r
5354 // Get sample rate information.
\r
5355 info.sampleRates.clear();
\r
5356 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
5357 if ( SAMPLE_RATES[k] >= (unsigned int) outCaps.dwMinSecondarySampleRate &&
\r
5358 SAMPLE_RATES[k] <= (unsigned int) outCaps.dwMaxSecondarySampleRate ) {
\r
5359 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
5361 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
\r
5362 info.preferredSampleRate = SAMPLE_RATES[k];
\r
5366 // Get format information.
\r
5367 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5368 if ( outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5370 output->Release();
\r
5372 if ( getDefaultOutputDevice() == device )
\r
5373 info.isDefaultOutput = true;
\r
5375 if ( dsDevices[ device ].validId[1] == false ) {
\r
5376 info.name = dsDevices[ device ].name;
\r
5377 info.probed = true;
\r
5383 LPDIRECTSOUNDCAPTURE input;
\r
5384 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
\r
5385 if ( FAILED( result ) ) {
\r
5386 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
\r
5387 errorText_ = errorStream_.str();
\r
5388 error( RtAudioError::WARNING );
\r
5393 inCaps.dwSize = sizeof( inCaps );
\r
5394 result = input->GetCaps( &inCaps );
\r
5395 if ( FAILED( result ) ) {
\r
5397 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting object capabilities (" << dsDevices[ device ].name << ")!";
\r
5398 errorText_ = errorStream_.str();
\r
5399 error( RtAudioError::WARNING );
\r
5403 // Get input channel information.
\r
5404 info.inputChannels = inCaps.dwChannels;
\r
5406 // Get sample rate and format information.
\r
5407 std::vector<unsigned int> rates;
\r
5408 if ( inCaps.dwChannels >= 2 ) {
\r
5409 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5410 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5411 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5412 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5413 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5414 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5415 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5416 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5418 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
\r
5419 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) rates.push_back( 11025 );
\r
5420 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) rates.push_back( 22050 );
\r
5421 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) rates.push_back( 44100 );
\r
5422 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) rates.push_back( 96000 );
\r
5424 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
\r
5425 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) rates.push_back( 11025 );
\r
5426 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) rates.push_back( 22050 );
\r
5427 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) rates.push_back( 44100 );
\r
5428 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) rates.push_back( 96000 );
\r
5431 else if ( inCaps.dwChannels == 1 ) {
\r
5432 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5433 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5434 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5435 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5436 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5437 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5438 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5439 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5441 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
\r
5442 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) rates.push_back( 11025 );
\r
5443 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) rates.push_back( 22050 );
\r
5444 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) rates.push_back( 44100 );
\r
5445 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) rates.push_back( 96000 );
\r
5447 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
\r
5448 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) rates.push_back( 11025 );
\r
5449 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) rates.push_back( 22050 );
\r
5450 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) rates.push_back( 44100 );
\r
5451 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) rates.push_back( 96000 );
\r
5454 else info.inputChannels = 0; // technically, this would be an error
\r
5458 if ( info.inputChannels == 0 ) return info;
\r
5460 // Copy the supported rates to the info structure but avoid duplication.
\r
5462 for ( unsigned int i=0; i<rates.size(); i++ ) {
\r
5464 for ( unsigned int j=0; j<info.sampleRates.size(); j++ ) {
\r
5465 if ( rates[i] == info.sampleRates[j] ) {
\r
5470 if ( found == false ) info.sampleRates.push_back( rates[i] );
\r
5472 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
\r
5474 // If device opens for both playback and capture, we determine the channels.
\r
5475 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
5476 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
5478 if ( device == 0 ) info.isDefaultInput = true;
\r
5480 // Copy name and return.
\r
5481 info.name = dsDevices[ device ].name;
\r
5482 info.probed = true;
\r
5486 bool RtApiDs :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
5487 unsigned int firstChannel, unsigned int sampleRate,
\r
5488 RtAudioFormat format, unsigned int *bufferSize,
\r
5489 RtAudio::StreamOptions *options )
\r
5491 if ( channels + firstChannel > 2 ) {
\r
5492 errorText_ = "RtApiDs::probeDeviceOpen: DirectSound does not support more than 2 channels per device.";
\r
5496 size_t nDevices = dsDevices.size();
\r
5497 if ( nDevices == 0 ) {
\r
5498 // This should not happen because a check is made before this function is called.
\r
5499 errorText_ = "RtApiDs::probeDeviceOpen: no devices found!";
\r
5503 if ( device >= nDevices ) {
\r
5504 // This should not happen because a check is made before this function is called.
\r
5505 errorText_ = "RtApiDs::probeDeviceOpen: device ID is invalid!";
\r
5509 if ( mode == OUTPUT ) {
\r
5510 if ( dsDevices[ device ].validId[0] == false ) {
\r
5511 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support output!";
\r
5512 errorText_ = errorStream_.str();
\r
5516 else { // mode == INPUT
\r
5517 if ( dsDevices[ device ].validId[1] == false ) {
\r
5518 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support input!";
\r
5519 errorText_ = errorStream_.str();
\r
5524 // According to a note in PortAudio, using GetDesktopWindow()
\r
5525 // instead of GetForegroundWindow() is supposed to avoid problems
\r
5526 // that occur when the application's window is not the foreground
\r
5527 // window. Also, if the application window closes before the
\r
5528 // DirectSound buffer, DirectSound can crash. In the past, I had
\r
5529 // problems when using GetDesktopWindow() but it seems fine now
\r
5530 // (January 2010). I'll leave it commented here.
\r
5531 // HWND hWnd = GetForegroundWindow();
\r
5532 HWND hWnd = GetDesktopWindow();
\r
5534 // Check the numberOfBuffers parameter and limit the lowest value to
\r
5535 // two. This is a judgement call and a value of two is probably too
\r
5536 // low for capture, but it should work for playback.
\r
5538 if ( options ) nBuffers = options->numberOfBuffers;
\r
5539 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) nBuffers = 2;
\r
5540 if ( nBuffers < 2 ) nBuffers = 3;
\r
5542 // Check the lower range of the user-specified buffer size and set
\r
5543 // (arbitrarily) to a lower bound of 32.
\r
5544 if ( *bufferSize < 32 ) *bufferSize = 32;
\r
5546 // Create the wave format structure. The data format setting will
\r
5547 // be determined later.
\r
5548 WAVEFORMATEX waveFormat;
\r
5549 ZeroMemory( &waveFormat, sizeof(WAVEFORMATEX) );
\r
5550 waveFormat.wFormatTag = WAVE_FORMAT_PCM;
\r
5551 waveFormat.nChannels = channels + firstChannel;
\r
5552 waveFormat.nSamplesPerSec = (unsigned long) sampleRate;
\r
5554 // Determine the device buffer size. By default, we'll use the value
\r
5555 // defined above (32K), but we will grow it to make allowances for
\r
5556 // very large software buffer sizes.
\r
5557 DWORD dsBufferSize = MINIMUM_DEVICE_BUFFER_SIZE;
\r
5558 DWORD dsPointerLeadTime = 0;
\r
5560 void *ohandle = 0, *bhandle = 0;
\r
5562 if ( mode == OUTPUT ) {
\r
5564 LPDIRECTSOUND output;
\r
5565 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
\r
5566 if ( FAILED( result ) ) {
\r
5567 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
\r
5568 errorText_ = errorStream_.str();
\r
5573 outCaps.dwSize = sizeof( outCaps );
\r
5574 result = output->GetCaps( &outCaps );
\r
5575 if ( FAILED( result ) ) {
\r
5576 output->Release();
\r
5577 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting capabilities (" << dsDevices[ device ].name << ")!";
\r
5578 errorText_ = errorStream_.str();
\r
5582 // Check channel information.
\r
5583 if ( channels + firstChannel == 2 && !( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ) {
\r
5584 errorStream_ << "RtApiDs::getDeviceInfo: the output device (" << dsDevices[ device ].name << ") does not support stereo playback.";
\r
5585 errorText_ = errorStream_.str();
\r
5589 // Check format information. Use 16-bit format unless not
\r
5590 // supported or user requests 8-bit.
\r
5591 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT &&
\r
5592 !( format == RTAUDIO_SINT8 && outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) ) {
\r
5593 waveFormat.wBitsPerSample = 16;
\r
5594 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
5597 waveFormat.wBitsPerSample = 8;
\r
5598 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
5600 stream_.userFormat = format;
\r
5602 // Update wave format structure and buffer information.
\r
5603 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
\r
5604 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
\r
5605 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
\r
5607 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
\r
5608 while ( dsPointerLeadTime * 2U > dsBufferSize )
\r
5609 dsBufferSize *= 2;
\r
5611 // Set cooperative level to DSSCL_EXCLUSIVE ... sound stops when window focus changes.
\r
5612 // result = output->SetCooperativeLevel( hWnd, DSSCL_EXCLUSIVE );
\r
5613 // Set cooperative level to DSSCL_PRIORITY ... sound remains when window focus changes.
\r
5614 result = output->SetCooperativeLevel( hWnd, DSSCL_PRIORITY );
\r
5615 if ( FAILED( result ) ) {
\r
5616 output->Release();
\r
5617 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting cooperative level (" << dsDevices[ device ].name << ")!";
\r
5618 errorText_ = errorStream_.str();
\r
5622 // Even though we will write to the secondary buffer, we need to
\r
5623 // access the primary buffer to set the correct output format
\r
5624 // (since the default is 8-bit, 22 kHz!). Setup the DS primary
\r
5625 // buffer description.
\r
5626 DSBUFFERDESC bufferDescription;
\r
5627 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
\r
5628 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
\r
5629 bufferDescription.dwFlags = DSBCAPS_PRIMARYBUFFER;
\r
5631 // Obtain the primary buffer
\r
5632 LPDIRECTSOUNDBUFFER buffer;
\r
5633 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
5634 if ( FAILED( result ) ) {
\r
5635 output->Release();
\r
5636 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") accessing primary buffer (" << dsDevices[ device ].name << ")!";
\r
5637 errorText_ = errorStream_.str();
\r
5641 // Set the primary DS buffer sound format.
\r
5642 result = buffer->SetFormat( &waveFormat );
\r
5643 if ( FAILED( result ) ) {
\r
5644 output->Release();
\r
5645 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting primary buffer format (" << dsDevices[ device ].name << ")!";
\r
5646 errorText_ = errorStream_.str();
\r
5650 // Setup the secondary DS buffer description.
\r
5651 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
\r
5652 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
\r
5653 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
\r
5654 DSBCAPS_GLOBALFOCUS |
\r
5655 DSBCAPS_GETCURRENTPOSITION2 |
\r
5656 DSBCAPS_LOCHARDWARE ); // Force hardware mixing
\r
5657 bufferDescription.dwBufferBytes = dsBufferSize;
\r
5658 bufferDescription.lpwfxFormat = &waveFormat;
\r
5660 // Try to create the secondary DS buffer. If that doesn't work,
\r
5661 // try to use software mixing. Otherwise, there's a problem.
\r
5662 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
5663 if ( FAILED( result ) ) {
\r
5664 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
\r
5665 DSBCAPS_GLOBALFOCUS |
\r
5666 DSBCAPS_GETCURRENTPOSITION2 |
\r
5667 DSBCAPS_LOCSOFTWARE ); // Force software mixing
\r
5668 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
5669 if ( FAILED( result ) ) {
\r
5670 output->Release();
\r
5671 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating secondary buffer (" << dsDevices[ device ].name << ")!";
\r
5672 errorText_ = errorStream_.str();
\r
5677 // Get the buffer size ... might be different from what we specified.
\r
5679 dsbcaps.dwSize = sizeof( DSBCAPS );
\r
5680 result = buffer->GetCaps( &dsbcaps );
\r
5681 if ( FAILED( result ) ) {
\r
5682 output->Release();
\r
5683 buffer->Release();
\r
5684 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
\r
5685 errorText_ = errorStream_.str();
\r
5689 dsBufferSize = dsbcaps.dwBufferBytes;
\r
5691 // Lock the DS buffer
\r
5694 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
\r
5695 if ( FAILED( result ) ) {
\r
5696 output->Release();
\r
5697 buffer->Release();
\r
5698 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking buffer (" << dsDevices[ device ].name << ")!";
\r
5699 errorText_ = errorStream_.str();
\r
5703 // Zero the DS buffer
\r
5704 ZeroMemory( audioPtr, dataLen );
\r
5706 // Unlock the DS buffer
\r
5707 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
5708 if ( FAILED( result ) ) {
\r
5709 output->Release();
\r
5710 buffer->Release();
\r
5711 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking buffer (" << dsDevices[ device ].name << ")!";
\r
5712 errorText_ = errorStream_.str();
\r
5716 ohandle = (void *) output;
\r
5717 bhandle = (void *) buffer;
\r
5720 if ( mode == INPUT ) {
\r
5722 LPDIRECTSOUNDCAPTURE input;
\r
5723 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
\r
5724 if ( FAILED( result ) ) {
\r
5725 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
\r
5726 errorText_ = errorStream_.str();
\r
5731 inCaps.dwSize = sizeof( inCaps );
\r
5732 result = input->GetCaps( &inCaps );
\r
5733 if ( FAILED( result ) ) {
\r
5735 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting input capabilities (" << dsDevices[ device ].name << ")!";
\r
5736 errorText_ = errorStream_.str();
\r
5740 // Check channel information.
\r
5741 if ( inCaps.dwChannels < channels + firstChannel ) {
\r
5742 errorText_ = "RtApiDs::getDeviceInfo: the input device does not support requested input channels.";
\r
5746 // Check format information. Use 16-bit format unless user
\r
5747 // requests 8-bit.
\r
5748 DWORD deviceFormats;
\r
5749 if ( channels + firstChannel == 2 ) {
\r
5750 deviceFormats = WAVE_FORMAT_1S08 | WAVE_FORMAT_2S08 | WAVE_FORMAT_4S08 | WAVE_FORMAT_96S08;
\r
5751 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
\r
5752 waveFormat.wBitsPerSample = 8;
\r
5753 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
5755 else { // assume 16-bit is supported
\r
5756 waveFormat.wBitsPerSample = 16;
\r
5757 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
5760 else { // channel == 1
\r
5761 deviceFormats = WAVE_FORMAT_1M08 | WAVE_FORMAT_2M08 | WAVE_FORMAT_4M08 | WAVE_FORMAT_96M08;
\r
5762 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
\r
5763 waveFormat.wBitsPerSample = 8;
\r
5764 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
5766 else { // assume 16-bit is supported
\r
5767 waveFormat.wBitsPerSample = 16;
\r
5768 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
5771 stream_.userFormat = format;
\r
5773 // Update wave format structure and buffer information.
\r
5774 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
\r
5775 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
\r
5776 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
\r
5778 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
\r
5779 while ( dsPointerLeadTime * 2U > dsBufferSize )
\r
5780 dsBufferSize *= 2;
\r
5782 // Setup the secondary DS buffer description.
\r
5783 DSCBUFFERDESC bufferDescription;
\r
5784 ZeroMemory( &bufferDescription, sizeof( DSCBUFFERDESC ) );
\r
5785 bufferDescription.dwSize = sizeof( DSCBUFFERDESC );
\r
5786 bufferDescription.dwFlags = 0;
\r
5787 bufferDescription.dwReserved = 0;
\r
5788 bufferDescription.dwBufferBytes = dsBufferSize;
\r
5789 bufferDescription.lpwfxFormat = &waveFormat;
\r
5791 // Create the capture buffer.
\r
5792 LPDIRECTSOUNDCAPTUREBUFFER buffer;
\r
5793 result = input->CreateCaptureBuffer( &bufferDescription, &buffer, NULL );
\r
5794 if ( FAILED( result ) ) {
\r
5796 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating input buffer (" << dsDevices[ device ].name << ")!";
\r
5797 errorText_ = errorStream_.str();
\r
5801 // Get the buffer size ... might be different from what we specified.
\r
5802 DSCBCAPS dscbcaps;
\r
5803 dscbcaps.dwSize = sizeof( DSCBCAPS );
\r
5804 result = buffer->GetCaps( &dscbcaps );
\r
5805 if ( FAILED( result ) ) {
\r
5807 buffer->Release();
\r
5808 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
\r
5809 errorText_ = errorStream_.str();
\r
5813 dsBufferSize = dscbcaps.dwBufferBytes;
\r
5815 // NOTE: We could have a problem here if this is a duplex stream
\r
5816 // and the play and capture hardware buffer sizes are different
\r
5817 // (I'm actually not sure if that is a problem or not).
\r
5818 // Currently, we are not verifying that.
\r
5820 // Lock the capture buffer
\r
5823 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
\r
5824 if ( FAILED( result ) ) {
\r
5826 buffer->Release();
\r
5827 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking input buffer (" << dsDevices[ device ].name << ")!";
\r
5828 errorText_ = errorStream_.str();
\r
5832 // Zero the buffer
\r
5833 ZeroMemory( audioPtr, dataLen );
\r
5835 // Unlock the buffer
\r
5836 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
5837 if ( FAILED( result ) ) {
\r
5839 buffer->Release();
\r
5840 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking input buffer (" << dsDevices[ device ].name << ")!";
\r
5841 errorText_ = errorStream_.str();
\r
5845 ohandle = (void *) input;
\r
5846 bhandle = (void *) buffer;
\r
5849 // Set various stream parameters
\r
5850 DsHandle *handle = 0;
\r
5851 stream_.nDeviceChannels[mode] = channels + firstChannel;
\r
5852 stream_.nUserChannels[mode] = channels;
\r
5853 stream_.bufferSize = *bufferSize;
\r
5854 stream_.channelOffset[mode] = firstChannel;
\r
5855 stream_.deviceInterleaved[mode] = true;
\r
5856 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
5857 else stream_.userInterleaved = true;
\r
5859 // Set flag for buffer conversion
\r
5860 stream_.doConvertBuffer[mode] = false;
\r
5861 if (stream_.nUserChannels[mode] != stream_.nDeviceChannels[mode])
\r
5862 stream_.doConvertBuffer[mode] = true;
\r
5863 if (stream_.userFormat != stream_.deviceFormat[mode])
\r
5864 stream_.doConvertBuffer[mode] = true;
\r
5865 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
5866 stream_.nUserChannels[mode] > 1 )
\r
5867 stream_.doConvertBuffer[mode] = true;
\r
5869 // Allocate necessary internal buffers
\r
5870 long bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
5871 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
5872 if ( stream_.userBuffer[mode] == NULL ) {
\r
5873 errorText_ = "RtApiDs::probeDeviceOpen: error allocating user buffer memory.";
\r
5877 if ( stream_.doConvertBuffer[mode] ) {
\r
5879 bool makeBuffer = true;
\r
5880 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
5881 if ( mode == INPUT ) {
\r
5882 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
5883 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
5884 if ( bufferBytes <= (long) bytesOut ) makeBuffer = false;
\r
5888 if ( makeBuffer ) {
\r
5889 bufferBytes *= *bufferSize;
\r
5890 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
5891 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
5892 if ( stream_.deviceBuffer == NULL ) {
\r
5893 errorText_ = "RtApiDs::probeDeviceOpen: error allocating device buffer memory.";
\r
5899 // Allocate our DsHandle structures for the stream.
\r
5900 if ( stream_.apiHandle == 0 ) {
\r
5902 handle = new DsHandle;
\r
5904 catch ( std::bad_alloc& ) {
\r
5905 errorText_ = "RtApiDs::probeDeviceOpen: error allocating AsioHandle memory.";
\r
5909 // Create a manual-reset event.
\r
5910 handle->condition = CreateEvent( NULL, // no security
\r
5911 TRUE, // manual-reset
\r
5912 FALSE, // non-signaled initially
\r
5913 NULL ); // unnamed
\r
5914 stream_.apiHandle = (void *) handle;
\r
5917 handle = (DsHandle *) stream_.apiHandle;
\r
5918 handle->id[mode] = ohandle;
\r
5919 handle->buffer[mode] = bhandle;
\r
5920 handle->dsBufferSize[mode] = dsBufferSize;
\r
5921 handle->dsPointerLeadTime[mode] = dsPointerLeadTime;
\r
5923 stream_.device[mode] = device;
\r
5924 stream_.state = STREAM_STOPPED;
\r
5925 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
5926 // We had already set up an output stream.
\r
5927 stream_.mode = DUPLEX;
\r
5929 stream_.mode = mode;
\r
5930 stream_.nBuffers = nBuffers;
\r
5931 stream_.sampleRate = sampleRate;
\r
5933 // Setup the buffer conversion information structure.
\r
5934 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
5936 // Setup the callback thread.
\r
5937 if ( stream_.callbackInfo.isRunning == false ) {
\r
5938 unsigned threadId;
\r
5939 stream_.callbackInfo.isRunning = true;
\r
5940 stream_.callbackInfo.object = (void *) this;
\r
5941 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &callbackHandler,
\r
5942 &stream_.callbackInfo, 0, &threadId );
\r
5943 if ( stream_.callbackInfo.thread == 0 ) {
\r
5944 errorText_ = "RtApiDs::probeDeviceOpen: error creating callback thread!";
\r
5948 // Boost DS thread priority
\r
5949 SetThreadPriority( (HANDLE) stream_.callbackInfo.thread, THREAD_PRIORITY_HIGHEST );
\r
5955 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
\r
5956 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
\r
5957 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
5958 if ( buffer ) buffer->Release();
\r
5959 object->Release();
\r
5961 if ( handle->buffer[1] ) {
\r
5962 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
\r
5963 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
5964 if ( buffer ) buffer->Release();
\r
5965 object->Release();
\r
5967 CloseHandle( handle->condition );
\r
5969 stream_.apiHandle = 0;
\r
5972 for ( int i=0; i<2; i++ ) {
\r
5973 if ( stream_.userBuffer[i] ) {
\r
5974 free( stream_.userBuffer[i] );
\r
5975 stream_.userBuffer[i] = 0;
\r
5979 if ( stream_.deviceBuffer ) {
\r
5980 free( stream_.deviceBuffer );
\r
5981 stream_.deviceBuffer = 0;
\r
5984 stream_.state = STREAM_CLOSED;
\r
5988 void RtApiDs :: closeStream()
\r
5990 if ( stream_.state == STREAM_CLOSED ) {
\r
5991 errorText_ = "RtApiDs::closeStream(): no open stream to close!";
\r
5992 error( RtAudioError::WARNING );
\r
5996 // Stop the callback thread.
\r
5997 stream_.callbackInfo.isRunning = false;
\r
5998 WaitForSingleObject( (HANDLE) stream_.callbackInfo.thread, INFINITE );
\r
5999 CloseHandle( (HANDLE) stream_.callbackInfo.thread );
\r
6001 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
6003 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
\r
6004 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
\r
6005 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6008 buffer->Release();
\r
6010 object->Release();
\r
6012 if ( handle->buffer[1] ) {
\r
6013 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
\r
6014 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6017 buffer->Release();
\r
6019 object->Release();
\r
6021 CloseHandle( handle->condition );
\r
6023 stream_.apiHandle = 0;
\r
6026 for ( int i=0; i<2; i++ ) {
\r
6027 if ( stream_.userBuffer[i] ) {
\r
6028 free( stream_.userBuffer[i] );
\r
6029 stream_.userBuffer[i] = 0;
\r
6033 if ( stream_.deviceBuffer ) {
\r
6034 free( stream_.deviceBuffer );
\r
6035 stream_.deviceBuffer = 0;
\r
6038 stream_.mode = UNINITIALIZED;
\r
6039 stream_.state = STREAM_CLOSED;
\r
6042 void RtApiDs :: startStream()
\r
6045 if ( stream_.state == STREAM_RUNNING ) {
\r
6046 errorText_ = "RtApiDs::startStream(): the stream is already running!";
\r
6047 error( RtAudioError::WARNING );
\r
6051 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
6053 // Increase scheduler frequency on lesser windows (a side-effect of
\r
6054 // increasing timer accuracy). On greater windows (Win2K or later),
\r
6055 // this is already in effect.
\r
6056 timeBeginPeriod( 1 );
\r
6058 buffersRolling = false;
\r
6059 duplexPrerollBytes = 0;
\r
6061 if ( stream_.mode == DUPLEX ) {
\r
6062 // 0.5 seconds of silence in DUPLEX mode while the devices spin up and synchronize.
\r
6063 duplexPrerollBytes = (int) ( 0.5 * stream_.sampleRate * formatBytes( stream_.deviceFormat[1] ) * stream_.nDeviceChannels[1] );
\r
6066 HRESULT result = 0;
\r
6067 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6069 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6070 result = buffer->Play( 0, 0, DSBPLAY_LOOPING );
\r
6071 if ( FAILED( result ) ) {
\r
6072 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting output buffer!";
\r
6073 errorText_ = errorStream_.str();
\r
6078 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
6080 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6081 result = buffer->Start( DSCBSTART_LOOPING );
\r
6082 if ( FAILED( result ) ) {
\r
6083 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting input buffer!";
\r
6084 errorText_ = errorStream_.str();
\r
6089 handle->drainCounter = 0;
\r
6090 handle->internalDrain = false;
\r
6091 ResetEvent( handle->condition );
\r
6092 stream_.state = STREAM_RUNNING;
\r
6095 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
\r
6098 void RtApiDs :: stopStream()
\r
6101 if ( stream_.state == STREAM_STOPPED ) {
\r
6102 errorText_ = "RtApiDs::stopStream(): the stream is already stopped!";
\r
6103 error( RtAudioError::WARNING );
\r
6107 HRESULT result = 0;
\r
6110 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
6111 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6112 if ( handle->drainCounter == 0 ) {
\r
6113 handle->drainCounter = 2;
\r
6114 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
\r
6117 stream_.state = STREAM_STOPPED;
\r
6119 MUTEX_LOCK( &stream_.mutex );
\r
6121 // Stop the buffer and clear memory
\r
6122 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6123 result = buffer->Stop();
\r
6124 if ( FAILED( result ) ) {
\r
6125 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping output buffer!";
\r
6126 errorText_ = errorStream_.str();
\r
6130 // Lock the buffer and clear it so that if we start to play again,
\r
6131 // we won't have old data playing.
\r
6132 result = buffer->Lock( 0, handle->dsBufferSize[0], &audioPtr, &dataLen, NULL, NULL, 0 );
\r
6133 if ( FAILED( result ) ) {
\r
6134 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking output buffer!";
\r
6135 errorText_ = errorStream_.str();
\r
6139 // Zero the DS buffer
\r
6140 ZeroMemory( audioPtr, dataLen );
\r
6142 // Unlock the DS buffer
\r
6143 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
6144 if ( FAILED( result ) ) {
\r
6145 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking output buffer!";
\r
6146 errorText_ = errorStream_.str();
\r
6150 // If we start playing again, we must begin at beginning of buffer.
\r
6151 handle->bufferPointer[0] = 0;
\r
6154 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
6155 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6159 stream_.state = STREAM_STOPPED;
\r
6161 if ( stream_.mode != DUPLEX )
\r
6162 MUTEX_LOCK( &stream_.mutex );
\r
6164 result = buffer->Stop();
\r
6165 if ( FAILED( result ) ) {
\r
6166 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping input buffer!";
\r
6167 errorText_ = errorStream_.str();
\r
6171 // Lock the buffer and clear it so that if we start to play again,
\r
6172 // we won't have old data playing.
\r
6173 result = buffer->Lock( 0, handle->dsBufferSize[1], &audioPtr, &dataLen, NULL, NULL, 0 );
\r
6174 if ( FAILED( result ) ) {
\r
6175 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking input buffer!";
\r
6176 errorText_ = errorStream_.str();
\r
6180 // Zero the DS buffer
\r
6181 ZeroMemory( audioPtr, dataLen );
\r
6183 // Unlock the DS buffer
\r
6184 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
6185 if ( FAILED( result ) ) {
\r
6186 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking input buffer!";
\r
6187 errorText_ = errorStream_.str();
\r
6191 // If we start recording again, we must begin at beginning of buffer.
\r
6192 handle->bufferPointer[1] = 0;
\r
6196 timeEndPeriod( 1 ); // revert to normal scheduler frequency on lesser windows.
\r
6197 MUTEX_UNLOCK( &stream_.mutex );
\r
6199 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
\r
6202 void RtApiDs :: abortStream()
\r
6205 if ( stream_.state == STREAM_STOPPED ) {
\r
6206 errorText_ = "RtApiDs::abortStream(): the stream is already stopped!";
\r
6207 error( RtAudioError::WARNING );
\r
6211 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
6212 handle->drainCounter = 2;
\r
6217 void RtApiDs :: callbackEvent()
\r
6219 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) {
\r
6220 Sleep( 50 ); // sleep 50 milliseconds
\r
6224 if ( stream_.state == STREAM_CLOSED ) {
\r
6225 errorText_ = "RtApiDs::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
6226 error( RtAudioError::WARNING );
\r
6230 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
6231 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
6233 // Check if we were draining the stream and signal is finished.
\r
6234 if ( handle->drainCounter > stream_.nBuffers + 2 ) {
\r
6236 stream_.state = STREAM_STOPPING;
\r
6237 if ( handle->internalDrain == false )
\r
6238 SetEvent( handle->condition );
\r
6244 // Invoke user callback to get fresh output data UNLESS we are
\r
6245 // draining stream.
\r
6246 if ( handle->drainCounter == 0 ) {
\r
6247 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
6248 double streamTime = getStreamTime();
\r
6249 RtAudioStreamStatus status = 0;
\r
6250 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
6251 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
6252 handle->xrun[0] = false;
\r
6254 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
6255 status |= RTAUDIO_INPUT_OVERFLOW;
\r
6256 handle->xrun[1] = false;
\r
6258 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
6259 stream_.bufferSize, streamTime, status, info->userData );
\r
6260 if ( cbReturnValue == 2 ) {
\r
6261 stream_.state = STREAM_STOPPING;
\r
6262 handle->drainCounter = 2;
\r
6266 else if ( cbReturnValue == 1 ) {
\r
6267 handle->drainCounter = 1;
\r
6268 handle->internalDrain = true;
\r
6273 DWORD currentWritePointer, safeWritePointer;
\r
6274 DWORD currentReadPointer, safeReadPointer;
\r
6275 UINT nextWritePointer;
\r
6277 LPVOID buffer1 = NULL;
\r
6278 LPVOID buffer2 = NULL;
\r
6279 DWORD bufferSize1 = 0;
\r
6280 DWORD bufferSize2 = 0;
\r
6285 MUTEX_LOCK( &stream_.mutex );
\r
6286 if ( stream_.state == STREAM_STOPPED ) {
\r
6287 MUTEX_UNLOCK( &stream_.mutex );
\r
6291 if ( buffersRolling == false ) {
\r
6292 if ( stream_.mode == DUPLEX ) {
\r
6293 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
\r
6295 // It takes a while for the devices to get rolling. As a result,
\r
6296 // there's no guarantee that the capture and write device pointers
\r
6297 // will move in lockstep. Wait here for both devices to start
\r
6298 // rolling, and then set our buffer pointers accordingly.
\r
6299 // e.g. Crystal Drivers: the capture buffer starts up 5700 to 9600
\r
6300 // bytes later than the write buffer.
\r
6302 // Stub: a serious risk of having a pre-emptive scheduling round
\r
6303 // take place between the two GetCurrentPosition calls... but I'm
\r
6304 // really not sure how to solve the problem. Temporarily boost to
\r
6305 // Realtime priority, maybe; but I'm not sure what priority the
\r
6306 // DirectSound service threads run at. We *should* be roughly
\r
6307 // within a ms or so of correct.
\r
6309 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6310 LPDIRECTSOUNDCAPTUREBUFFER dsCaptureBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6312 DWORD startSafeWritePointer, startSafeReadPointer;
\r
6314 result = dsWriteBuffer->GetCurrentPosition( NULL, &startSafeWritePointer );
\r
6315 if ( FAILED( result ) ) {
\r
6316 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
6317 errorText_ = errorStream_.str();
\r
6318 MUTEX_UNLOCK( &stream_.mutex );
\r
6319 error( RtAudioError::SYSTEM_ERROR );
\r
6322 result = dsCaptureBuffer->GetCurrentPosition( NULL, &startSafeReadPointer );
\r
6323 if ( FAILED( result ) ) {
\r
6324 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
6325 errorText_ = errorStream_.str();
\r
6326 MUTEX_UNLOCK( &stream_.mutex );
\r
6327 error( RtAudioError::SYSTEM_ERROR );
\r
6331 result = dsWriteBuffer->GetCurrentPosition( NULL, &safeWritePointer );
\r
6332 if ( FAILED( result ) ) {
\r
6333 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
6334 errorText_ = errorStream_.str();
\r
6335 MUTEX_UNLOCK( &stream_.mutex );
\r
6336 error( RtAudioError::SYSTEM_ERROR );
\r
6339 result = dsCaptureBuffer->GetCurrentPosition( NULL, &safeReadPointer );
\r
6340 if ( FAILED( result ) ) {
\r
6341 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
6342 errorText_ = errorStream_.str();
\r
6343 MUTEX_UNLOCK( &stream_.mutex );
\r
6344 error( RtAudioError::SYSTEM_ERROR );
\r
6347 if ( safeWritePointer != startSafeWritePointer && safeReadPointer != startSafeReadPointer ) break;
\r
6351 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
\r
6353 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
\r
6354 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
\r
6355 handle->bufferPointer[1] = safeReadPointer;
\r
6357 else if ( stream_.mode == OUTPUT ) {
\r
6359 // Set the proper nextWritePosition after initial startup.
\r
6360 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6361 result = dsWriteBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
\r
6362 if ( FAILED( result ) ) {
\r
6363 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
6364 errorText_ = errorStream_.str();
\r
6365 MUTEX_UNLOCK( &stream_.mutex );
\r
6366 error( RtAudioError::SYSTEM_ERROR );
\r
6369 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
\r
6370 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
\r
6373 buffersRolling = true;
\r
6376 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6378 LPDIRECTSOUNDBUFFER dsBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6380 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
6381 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
\r
6382 bufferBytes *= formatBytes( stream_.userFormat );
\r
6383 memset( stream_.userBuffer[0], 0, bufferBytes );
\r
6386 // Setup parameters and do buffer conversion if necessary.
\r
6387 if ( stream_.doConvertBuffer[0] ) {
\r
6388 buffer = stream_.deviceBuffer;
\r
6389 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
6390 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
6391 bufferBytes *= formatBytes( stream_.deviceFormat[0] );
\r
6394 buffer = stream_.userBuffer[0];
\r
6395 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
\r
6396 bufferBytes *= formatBytes( stream_.userFormat );
\r
6399 // No byte swapping necessary in DirectSound implementation.
\r
6401 // Ahhh ... windoze. 16-bit data is signed but 8-bit data is
\r
6402 // unsigned. So, we need to convert our signed 8-bit data here to
\r
6404 if ( stream_.deviceFormat[0] == RTAUDIO_SINT8 )
\r
6405 for ( int i=0; i<bufferBytes; i++ ) buffer[i] = (unsigned char) ( buffer[i] + 128 );
\r
6407 DWORD dsBufferSize = handle->dsBufferSize[0];
\r
6408 nextWritePointer = handle->bufferPointer[0];
\r
6410 DWORD endWrite, leadPointer;
\r
6412 // Find out where the read and "safe write" pointers are.
\r
6413 result = dsBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
\r
6414 if ( FAILED( result ) ) {
\r
6415 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
6416 errorText_ = errorStream_.str();
\r
6417 error( RtAudioError::SYSTEM_ERROR );
\r
6421 // We will copy our output buffer into the region between
\r
6422 // safeWritePointer and leadPointer. If leadPointer is not
\r
6423 // beyond the next endWrite position, wait until it is.
\r
6424 leadPointer = safeWritePointer + handle->dsPointerLeadTime[0];
\r
6425 //std::cout << "safeWritePointer = " << safeWritePointer << ", leadPointer = " << leadPointer << ", nextWritePointer = " << nextWritePointer << std::endl;
\r
6426 if ( leadPointer > dsBufferSize ) leadPointer -= dsBufferSize;
\r
6427 if ( leadPointer < nextWritePointer ) leadPointer += dsBufferSize; // unwrap offset
\r
6428 endWrite = nextWritePointer + bufferBytes;
\r
6430 // Check whether the entire write region is behind the play pointer.
\r
6431 if ( leadPointer >= endWrite ) break;
\r
6433 // If we are here, then we must wait until the leadPointer advances
\r
6434 // beyond the end of our next write region. We use the
\r
6435 // Sleep() function to suspend operation until that happens.
\r
6436 double millis = ( endWrite - leadPointer ) * 1000.0;
\r
6437 millis /= ( formatBytes( stream_.deviceFormat[0]) * stream_.nDeviceChannels[0] * stream_.sampleRate);
\r
6438 if ( millis < 1.0 ) millis = 1.0;
\r
6439 Sleep( (DWORD) millis );
\r
6442 if ( dsPointerBetween( nextWritePointer, safeWritePointer, currentWritePointer, dsBufferSize )
\r
6443 || dsPointerBetween( endWrite, safeWritePointer, currentWritePointer, dsBufferSize ) ) {
\r
6444 // We've strayed into the forbidden zone ... resync the read pointer.
\r
6445 handle->xrun[0] = true;
\r
6446 nextWritePointer = safeWritePointer + handle->dsPointerLeadTime[0] - bufferBytes;
\r
6447 if ( nextWritePointer >= dsBufferSize ) nextWritePointer -= dsBufferSize;
\r
6448 handle->bufferPointer[0] = nextWritePointer;
\r
6449 endWrite = nextWritePointer + bufferBytes;
\r
6452 // Lock free space in the buffer
\r
6453 result = dsBuffer->Lock( nextWritePointer, bufferBytes, &buffer1,
\r
6454 &bufferSize1, &buffer2, &bufferSize2, 0 );
\r
6455 if ( FAILED( result ) ) {
\r
6456 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking buffer during playback!";
\r
6457 errorText_ = errorStream_.str();
\r
6458 MUTEX_UNLOCK( &stream_.mutex );
\r
6459 error( RtAudioError::SYSTEM_ERROR );
\r
6463 // Copy our buffer into the DS buffer
\r
6464 CopyMemory( buffer1, buffer, bufferSize1 );
\r
6465 if ( buffer2 != NULL ) CopyMemory( buffer2, buffer+bufferSize1, bufferSize2 );
\r
6467 // Update our buffer offset and unlock sound buffer
\r
6468 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
\r
6469 if ( FAILED( result ) ) {
\r
6470 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking buffer during playback!";
\r
6471 errorText_ = errorStream_.str();
\r
6472 MUTEX_UNLOCK( &stream_.mutex );
\r
6473 error( RtAudioError::SYSTEM_ERROR );
\r
6476 nextWritePointer = ( nextWritePointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
\r
6477 handle->bufferPointer[0] = nextWritePointer;
\r
6480 // Don't bother draining input
\r
6481 if ( handle->drainCounter ) {
\r
6482 handle->drainCounter++;
\r
6486 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
6488 // Setup parameters.
\r
6489 if ( stream_.doConvertBuffer[1] ) {
\r
6490 buffer = stream_.deviceBuffer;
\r
6491 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[1];
\r
6492 bufferBytes *= formatBytes( stream_.deviceFormat[1] );
\r
6495 buffer = stream_.userBuffer[1];
\r
6496 bufferBytes = stream_.bufferSize * stream_.nUserChannels[1];
\r
6497 bufferBytes *= formatBytes( stream_.userFormat );
\r
6500 LPDIRECTSOUNDCAPTUREBUFFER dsBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6501 long nextReadPointer = handle->bufferPointer[1];
\r
6502 DWORD dsBufferSize = handle->dsBufferSize[1];
\r
6504 // Find out where the write and "safe read" pointers are.
\r
6505 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
\r
6506 if ( FAILED( result ) ) {
\r
6507 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
6508 errorText_ = errorStream_.str();
\r
6509 MUTEX_UNLOCK( &stream_.mutex );
\r
6510 error( RtAudioError::SYSTEM_ERROR );
\r
6514 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
\r
6515 DWORD endRead = nextReadPointer + bufferBytes;
\r
6517 // Handling depends on whether we are INPUT or DUPLEX.
\r
6518 // If we're in INPUT mode then waiting is a good thing. If we're in DUPLEX mode,
\r
6519 // then a wait here will drag the write pointers into the forbidden zone.
\r
6521 // In DUPLEX mode, rather than wait, we will back off the read pointer until
\r
6522 // it's in a safe position. This causes dropouts, but it seems to be the only
\r
6523 // practical way to sync up the read and write pointers reliably, given the
\r
6524 // the very complex relationship between phase and increment of the read and write
\r
6527 // In order to minimize audible dropouts in DUPLEX mode, we will
\r
6528 // provide a pre-roll period of 0.5 seconds in which we return
\r
6529 // zeros from the read buffer while the pointers sync up.
\r
6531 if ( stream_.mode == DUPLEX ) {
\r
6532 if ( safeReadPointer < endRead ) {
\r
6533 if ( duplexPrerollBytes <= 0 ) {
\r
6534 // Pre-roll time over. Be more agressive.
\r
6535 int adjustment = endRead-safeReadPointer;
\r
6537 handle->xrun[1] = true;
\r
6539 // - large adjustments: we've probably run out of CPU cycles, so just resync exactly,
\r
6540 // and perform fine adjustments later.
\r
6541 // - small adjustments: back off by twice as much.
\r
6542 if ( adjustment >= 2*bufferBytes )
\r
6543 nextReadPointer = safeReadPointer-2*bufferBytes;
\r
6545 nextReadPointer = safeReadPointer-bufferBytes-adjustment;
\r
6547 if ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
\r
6551 // In pre=roll time. Just do it.
\r
6552 nextReadPointer = safeReadPointer - bufferBytes;
\r
6553 while ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
\r
6555 endRead = nextReadPointer + bufferBytes;
\r
6558 else { // mode == INPUT
\r
6559 while ( safeReadPointer < endRead && stream_.callbackInfo.isRunning ) {
\r
6560 // See comments for playback.
\r
6561 double millis = (endRead - safeReadPointer) * 1000.0;
\r
6562 millis /= ( formatBytes(stream_.deviceFormat[1]) * stream_.nDeviceChannels[1] * stream_.sampleRate);
\r
6563 if ( millis < 1.0 ) millis = 1.0;
\r
6564 Sleep( (DWORD) millis );
\r
6566 // Wake up and find out where we are now.
\r
6567 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
\r
6568 if ( FAILED( result ) ) {
\r
6569 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
6570 errorText_ = errorStream_.str();
\r
6571 MUTEX_UNLOCK( &stream_.mutex );
\r
6572 error( RtAudioError::SYSTEM_ERROR );
\r
6576 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
\r
6580 // Lock free space in the buffer
\r
6581 result = dsBuffer->Lock( nextReadPointer, bufferBytes, &buffer1,
\r
6582 &bufferSize1, &buffer2, &bufferSize2, 0 );
\r
6583 if ( FAILED( result ) ) {
\r
6584 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking capture buffer!";
\r
6585 errorText_ = errorStream_.str();
\r
6586 MUTEX_UNLOCK( &stream_.mutex );
\r
6587 error( RtAudioError::SYSTEM_ERROR );
\r
6591 if ( duplexPrerollBytes <= 0 ) {
\r
6592 // Copy our buffer into the DS buffer
\r
6593 CopyMemory( buffer, buffer1, bufferSize1 );
\r
6594 if ( buffer2 != NULL ) CopyMemory( buffer+bufferSize1, buffer2, bufferSize2 );
\r
6597 memset( buffer, 0, bufferSize1 );
\r
6598 if ( buffer2 != NULL ) memset( buffer + bufferSize1, 0, bufferSize2 );
\r
6599 duplexPrerollBytes -= bufferSize1 + bufferSize2;
\r
6602 // Update our buffer offset and unlock sound buffer
\r
6603 nextReadPointer = ( nextReadPointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
\r
6604 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
\r
6605 if ( FAILED( result ) ) {
\r
6606 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking capture buffer!";
\r
6607 errorText_ = errorStream_.str();
\r
6608 MUTEX_UNLOCK( &stream_.mutex );
\r
6609 error( RtAudioError::SYSTEM_ERROR );
\r
6612 handle->bufferPointer[1] = nextReadPointer;
\r
6614 // No byte swapping necessary in DirectSound implementation.
\r
6616 // If necessary, convert 8-bit data from unsigned to signed.
\r
6617 if ( stream_.deviceFormat[1] == RTAUDIO_SINT8 )
\r
6618 for ( int j=0; j<bufferBytes; j++ ) buffer[j] = (signed char) ( buffer[j] - 128 );
\r
6620 // Do buffer conversion if necessary.
\r
6621 if ( stream_.doConvertBuffer[1] )
\r
6622 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
6626 MUTEX_UNLOCK( &stream_.mutex );
\r
6627 RtApi::tickStreamTime();
\r
6630 // Definitions for utility functions and callbacks
\r
6631 // specific to the DirectSound implementation.
\r
6633 static unsigned __stdcall callbackHandler( void *ptr )
\r
6635 CallbackInfo *info = (CallbackInfo *) ptr;
\r
6636 RtApiDs *object = (RtApiDs *) info->object;
\r
6637 bool* isRunning = &info->isRunning;
\r
6639 while ( *isRunning == true ) {
\r
6640 object->callbackEvent();
\r
6643 _endthreadex( 0 );
\r
6647 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
\r
6648 LPCTSTR description,
\r
6649 LPCTSTR /*module*/,
\r
6650 LPVOID lpContext )
\r
6652 struct DsProbeData& probeInfo = *(struct DsProbeData*) lpContext;
\r
6653 std::vector<struct DsDevice>& dsDevices = *probeInfo.dsDevices;
\r
6656 bool validDevice = false;
\r
6657 if ( probeInfo.isInput == true ) {
\r
6659 LPDIRECTSOUNDCAPTURE object;
\r
6661 hr = DirectSoundCaptureCreate( lpguid, &object, NULL );
\r
6662 if ( hr != DS_OK ) return TRUE;
\r
6664 caps.dwSize = sizeof(caps);
\r
6665 hr = object->GetCaps( &caps );
\r
6666 if ( hr == DS_OK ) {
\r
6667 if ( caps.dwChannels > 0 && caps.dwFormats > 0 )
\r
6668 validDevice = true;
\r
6670 object->Release();
\r
6674 LPDIRECTSOUND object;
\r
6675 hr = DirectSoundCreate( lpguid, &object, NULL );
\r
6676 if ( hr != DS_OK ) return TRUE;
\r
6678 caps.dwSize = sizeof(caps);
\r
6679 hr = object->GetCaps( &caps );
\r
6680 if ( hr == DS_OK ) {
\r
6681 if ( caps.dwFlags & DSCAPS_PRIMARYMONO || caps.dwFlags & DSCAPS_PRIMARYSTEREO )
\r
6682 validDevice = true;
\r
6684 object->Release();
\r
6687 // If good device, then save its name and guid.
\r
6688 std::string name = convertCharPointerToStdString( description );
\r
6689 //if ( name == "Primary Sound Driver" || name == "Primary Sound Capture Driver" )
\r
6690 if ( lpguid == NULL )
\r
6691 name = "Default Device";
\r
6692 if ( validDevice ) {
\r
6693 for ( unsigned int i=0; i<dsDevices.size(); i++ ) {
\r
6694 if ( dsDevices[i].name == name ) {
\r
6695 dsDevices[i].found = true;
\r
6696 if ( probeInfo.isInput ) {
\r
6697 dsDevices[i].id[1] = lpguid;
\r
6698 dsDevices[i].validId[1] = true;
\r
6701 dsDevices[i].id[0] = lpguid;
\r
6702 dsDevices[i].validId[0] = true;
\r
6709 device.name = name;
\r
6710 device.found = true;
\r
6711 if ( probeInfo.isInput ) {
\r
6712 device.id[1] = lpguid;
\r
6713 device.validId[1] = true;
\r
6716 device.id[0] = lpguid;
\r
6717 device.validId[0] = true;
\r
6719 dsDevices.push_back( device );
\r
6725 static const char* getErrorString( int code )
\r
6729 case DSERR_ALLOCATED:
\r
6730 return "Already allocated";
\r
6732 case DSERR_CONTROLUNAVAIL:
\r
6733 return "Control unavailable";
\r
6735 case DSERR_INVALIDPARAM:
\r
6736 return "Invalid parameter";
\r
6738 case DSERR_INVALIDCALL:
\r
6739 return "Invalid call";
\r
6741 case DSERR_GENERIC:
\r
6742 return "Generic error";
\r
6744 case DSERR_PRIOLEVELNEEDED:
\r
6745 return "Priority level needed";
\r
6747 case DSERR_OUTOFMEMORY:
\r
6748 return "Out of memory";
\r
6750 case DSERR_BADFORMAT:
\r
6751 return "The sample rate or the channel format is not supported";
\r
6753 case DSERR_UNSUPPORTED:
\r
6754 return "Not supported";
\r
6756 case DSERR_NODRIVER:
\r
6757 return "No driver";
\r
6759 case DSERR_ALREADYINITIALIZED:
\r
6760 return "Already initialized";
\r
6762 case DSERR_NOAGGREGATION:
\r
6763 return "No aggregation";
\r
6765 case DSERR_BUFFERLOST:
\r
6766 return "Buffer lost";
\r
6768 case DSERR_OTHERAPPHASPRIO:
\r
6769 return "Another application already has priority";
\r
6771 case DSERR_UNINITIALIZED:
\r
6772 return "Uninitialized";
\r
6775 return "DirectSound unknown error";
\r
6778 //******************** End of __WINDOWS_DS__ *********************//
\r
6782 #if defined(__LINUX_ALSA__)
\r
6784 #include <alsa/asoundlib.h>
\r
6785 #include <unistd.h>
\r
6787 // A structure to hold various information related to the ALSA API
\r
6788 // implementation.
\r
6789 struct AlsaHandle {
\r
6790 snd_pcm_t *handles[2];
\r
6791 bool synchronized;
\r
6793 pthread_cond_t runnable_cv;
\r
6797 :synchronized(false), runnable(false) { xrun[0] = false; xrun[1] = false; }
\r
6800 static void *alsaCallbackHandler( void * ptr );
\r
6802 RtApiAlsa :: RtApiAlsa()
\r
6804 // Nothing to do here.
\r
6807 RtApiAlsa :: ~RtApiAlsa()
\r
6809 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
6812 unsigned int RtApiAlsa :: getDeviceCount( void )
\r
6814 unsigned nDevices = 0;
\r
6815 int result, subdevice, card;
\r
6817 snd_ctl_t *handle;
\r
6819 // Count cards and devices
\r
6821 snd_card_next( &card );
\r
6822 while ( card >= 0 ) {
\r
6823 sprintf( name, "hw:%d", card );
\r
6824 result = snd_ctl_open( &handle, name, 0 );
\r
6825 if ( result < 0 ) {
\r
6826 errorStream_ << "RtApiAlsa::getDeviceCount: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
6827 errorText_ = errorStream_.str();
\r
6828 error( RtAudioError::WARNING );
\r
6833 result = snd_ctl_pcm_next_device( handle, &subdevice );
\r
6834 if ( result < 0 ) {
\r
6835 errorStream_ << "RtApiAlsa::getDeviceCount: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
\r
6836 errorText_ = errorStream_.str();
\r
6837 error( RtAudioError::WARNING );
\r
6840 if ( subdevice < 0 )
\r
6845 snd_ctl_close( handle );
\r
6846 snd_card_next( &card );
\r
6849 result = snd_ctl_open( &handle, "default", 0 );
\r
6850 if (result == 0) {
\r
6852 snd_ctl_close( handle );
\r
6858 RtAudio::DeviceInfo RtApiAlsa :: getDeviceInfo( unsigned int device )
\r
6860 RtAudio::DeviceInfo info;
\r
6861 info.probed = false;
\r
6863 unsigned nDevices = 0;
\r
6864 int result, subdevice, card;
\r
6866 snd_ctl_t *chandle;
\r
6868 // Count cards and devices
\r
6871 snd_card_next( &card );
\r
6872 while ( card >= 0 ) {
\r
6873 sprintf( name, "hw:%d", card );
\r
6874 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
\r
6875 if ( result < 0 ) {
\r
6876 errorStream_ << "RtApiAlsa::getDeviceInfo: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
6877 errorText_ = errorStream_.str();
\r
6878 error( RtAudioError::WARNING );
\r
6883 result = snd_ctl_pcm_next_device( chandle, &subdevice );
\r
6884 if ( result < 0 ) {
\r
6885 errorStream_ << "RtApiAlsa::getDeviceInfo: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
\r
6886 errorText_ = errorStream_.str();
\r
6887 error( RtAudioError::WARNING );
\r
6890 if ( subdevice < 0 ) break;
\r
6891 if ( nDevices == device ) {
\r
6892 sprintf( name, "hw:%d,%d", card, subdevice );
\r
6898 snd_ctl_close( chandle );
\r
6899 snd_card_next( &card );
\r
6902 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
\r
6903 if ( result == 0 ) {
\r
6904 if ( nDevices == device ) {
\r
6905 strcpy( name, "default" );
\r
6911 if ( nDevices == 0 ) {
\r
6912 errorText_ = "RtApiAlsa::getDeviceInfo: no devices found!";
\r
6913 error( RtAudioError::INVALID_USE );
\r
6917 if ( device >= nDevices ) {
\r
6918 errorText_ = "RtApiAlsa::getDeviceInfo: device ID is invalid!";
\r
6919 error( RtAudioError::INVALID_USE );
\r
6925 // If a stream is already open, we cannot probe the stream devices.
\r
6926 // Thus, use the saved results.
\r
6927 if ( stream_.state != STREAM_CLOSED &&
\r
6928 ( stream_.device[0] == device || stream_.device[1] == device ) ) {
\r
6929 snd_ctl_close( chandle );
\r
6930 if ( device >= devices_.size() ) {
\r
6931 errorText_ = "RtApiAlsa::getDeviceInfo: device ID was not present before stream was opened.";
\r
6932 error( RtAudioError::WARNING );
\r
6935 return devices_[ device ];
\r
6938 int openMode = SND_PCM_ASYNC;
\r
6939 snd_pcm_stream_t stream;
\r
6940 snd_pcm_info_t *pcminfo;
\r
6941 snd_pcm_info_alloca( &pcminfo );
\r
6942 snd_pcm_t *phandle;
\r
6943 snd_pcm_hw_params_t *params;
\r
6944 snd_pcm_hw_params_alloca( ¶ms );
\r
6946 // First try for playback unless default device (which has subdev -1)
\r
6947 stream = SND_PCM_STREAM_PLAYBACK;
\r
6948 snd_pcm_info_set_stream( pcminfo, stream );
\r
6949 if ( subdevice != -1 ) {
\r
6950 snd_pcm_info_set_device( pcminfo, subdevice );
\r
6951 snd_pcm_info_set_subdevice( pcminfo, 0 );
\r
6953 result = snd_ctl_pcm_info( chandle, pcminfo );
\r
6954 if ( result < 0 ) {
\r
6955 // Device probably doesn't support playback.
\r
6956 goto captureProbe;
\r
6960 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK );
\r
6961 if ( result < 0 ) {
\r
6962 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
6963 errorText_ = errorStream_.str();
\r
6964 error( RtAudioError::WARNING );
\r
6965 goto captureProbe;
\r
6968 // The device is open ... fill the parameter structure.
\r
6969 result = snd_pcm_hw_params_any( phandle, params );
\r
6970 if ( result < 0 ) {
\r
6971 snd_pcm_close( phandle );
\r
6972 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
6973 errorText_ = errorStream_.str();
\r
6974 error( RtAudioError::WARNING );
\r
6975 goto captureProbe;
\r
6978 // Get output channel information.
\r
6979 unsigned int value;
\r
6980 result = snd_pcm_hw_params_get_channels_max( params, &value );
\r
6981 if ( result < 0 ) {
\r
6982 snd_pcm_close( phandle );
\r
6983 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") output channels, " << snd_strerror( result ) << ".";
\r
6984 errorText_ = errorStream_.str();
\r
6985 error( RtAudioError::WARNING );
\r
6986 goto captureProbe;
\r
6988 info.outputChannels = value;
\r
6989 snd_pcm_close( phandle );
\r
6992 stream = SND_PCM_STREAM_CAPTURE;
\r
6993 snd_pcm_info_set_stream( pcminfo, stream );
\r
6995 // Now try for capture unless default device (with subdev = -1)
\r
6996 if ( subdevice != -1 ) {
\r
6997 result = snd_ctl_pcm_info( chandle, pcminfo );
\r
6998 snd_ctl_close( chandle );
\r
6999 if ( result < 0 ) {
\r
7000 // Device probably doesn't support capture.
\r
7001 if ( info.outputChannels == 0 ) return info;
\r
7002 goto probeParameters;
\r
7006 snd_ctl_close( chandle );
\r
7008 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
\r
7009 if ( result < 0 ) {
\r
7010 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7011 errorText_ = errorStream_.str();
\r
7012 error( RtAudioError::WARNING );
\r
7013 if ( info.outputChannels == 0 ) return info;
\r
7014 goto probeParameters;
\r
7017 // The device is open ... fill the parameter structure.
\r
7018 result = snd_pcm_hw_params_any( phandle, params );
\r
7019 if ( result < 0 ) {
\r
7020 snd_pcm_close( phandle );
\r
7021 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7022 errorText_ = errorStream_.str();
\r
7023 error( RtAudioError::WARNING );
\r
7024 if ( info.outputChannels == 0 ) return info;
\r
7025 goto probeParameters;
\r
7028 result = snd_pcm_hw_params_get_channels_max( params, &value );
\r
7029 if ( result < 0 ) {
\r
7030 snd_pcm_close( phandle );
\r
7031 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") input channels, " << snd_strerror( result ) << ".";
\r
7032 errorText_ = errorStream_.str();
\r
7033 error( RtAudioError::WARNING );
\r
7034 if ( info.outputChannels == 0 ) return info;
\r
7035 goto probeParameters;
\r
7037 info.inputChannels = value;
\r
7038 snd_pcm_close( phandle );
\r
7040 // If device opens for both playback and capture, we determine the channels.
\r
7041 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
7042 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
7044 // ALSA doesn't provide default devices so we'll use the first available one.
\r
7045 if ( device == 0 && info.outputChannels > 0 )
\r
7046 info.isDefaultOutput = true;
\r
7047 if ( device == 0 && info.inputChannels > 0 )
\r
7048 info.isDefaultInput = true;
\r
7051 // At this point, we just need to figure out the supported data
\r
7052 // formats and sample rates. We'll proceed by opening the device in
\r
7053 // the direction with the maximum number of channels, or playback if
\r
7054 // they are equal. This might limit our sample rate options, but so
\r
7057 if ( info.outputChannels >= info.inputChannels )
\r
7058 stream = SND_PCM_STREAM_PLAYBACK;
\r
7060 stream = SND_PCM_STREAM_CAPTURE;
\r
7061 snd_pcm_info_set_stream( pcminfo, stream );
\r
7063 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
\r
7064 if ( result < 0 ) {
\r
7065 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7066 errorText_ = errorStream_.str();
\r
7067 error( RtAudioError::WARNING );
\r
7071 // The device is open ... fill the parameter structure.
\r
7072 result = snd_pcm_hw_params_any( phandle, params );
\r
7073 if ( result < 0 ) {
\r
7074 snd_pcm_close( phandle );
\r
7075 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7076 errorText_ = errorStream_.str();
\r
7077 error( RtAudioError::WARNING );
\r
7081 // Test our discrete set of sample rate values.
\r
7082 info.sampleRates.clear();
\r
7083 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
\r
7084 if ( snd_pcm_hw_params_test_rate( phandle, params, SAMPLE_RATES[i], 0 ) == 0 ) {
\r
7085 info.sampleRates.push_back( SAMPLE_RATES[i] );
\r
7087 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
\r
7088 info.preferredSampleRate = SAMPLE_RATES[i];
\r
7091 if ( info.sampleRates.size() == 0 ) {
\r
7092 snd_pcm_close( phandle );
\r
7093 errorStream_ << "RtApiAlsa::getDeviceInfo: no supported sample rates found for device (" << name << ").";
\r
7094 errorText_ = errorStream_.str();
\r
7095 error( RtAudioError::WARNING );
\r
7099 // Probe the supported data formats ... we don't care about endian-ness just yet
\r
7100 snd_pcm_format_t format;
\r
7101 info.nativeFormats = 0;
\r
7102 format = SND_PCM_FORMAT_S8;
\r
7103 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7104 info.nativeFormats |= RTAUDIO_SINT8;
\r
7105 format = SND_PCM_FORMAT_S16;
\r
7106 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7107 info.nativeFormats |= RTAUDIO_SINT16;
\r
7108 format = SND_PCM_FORMAT_S24;
\r
7109 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7110 info.nativeFormats |= RTAUDIO_SINT24;
\r
7111 format = SND_PCM_FORMAT_S32;
\r
7112 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7113 info.nativeFormats |= RTAUDIO_SINT32;
\r
7114 format = SND_PCM_FORMAT_FLOAT;
\r
7115 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7116 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
7117 format = SND_PCM_FORMAT_FLOAT64;
\r
7118 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7119 info.nativeFormats |= RTAUDIO_FLOAT64;
\r
7121 // Check that we have at least one supported format
\r
7122 if ( info.nativeFormats == 0 ) {
\r
7123 snd_pcm_close( phandle );
\r
7124 errorStream_ << "RtApiAlsa::getDeviceInfo: pcm device (" << name << ") data format not supported by RtAudio.";
\r
7125 errorText_ = errorStream_.str();
\r
7126 error( RtAudioError::WARNING );
\r
7130 // Get the device name
\r
7132 result = snd_card_get_name( card, &cardname );
\r
7133 if ( result >= 0 ) {
\r
7134 sprintf( name, "hw:%s,%d", cardname, subdevice );
\r
7139 // That's all ... close the device and return
\r
7140 snd_pcm_close( phandle );
\r
7141 info.probed = true;
\r
7145 void RtApiAlsa :: saveDeviceInfo( void )
\r
7149 unsigned int nDevices = getDeviceCount();
\r
7150 devices_.resize( nDevices );
\r
7151 for ( unsigned int i=0; i<nDevices; i++ )
\r
7152 devices_[i] = getDeviceInfo( i );
\r
7155 bool RtApiAlsa :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
7156 unsigned int firstChannel, unsigned int sampleRate,
\r
7157 RtAudioFormat format, unsigned int *bufferSize,
\r
7158 RtAudio::StreamOptions *options )
\r
7161 #if defined(__RTAUDIO_DEBUG__)
\r
7162 snd_output_t *out;
\r
7163 snd_output_stdio_attach(&out, stderr, 0);
\r
7166 // I'm not using the "plug" interface ... too much inconsistent behavior.
\r
7168 unsigned nDevices = 0;
\r
7169 int result, subdevice, card;
\r
7171 snd_ctl_t *chandle;
\r
7173 if ( options && options->flags & RTAUDIO_ALSA_USE_DEFAULT )
\r
7174 snprintf(name, sizeof(name), "%s", "default");
\r
7176 // Count cards and devices
\r
7178 snd_card_next( &card );
\r
7179 while ( card >= 0 ) {
\r
7180 sprintf( name, "hw:%d", card );
\r
7181 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
\r
7182 if ( result < 0 ) {
\r
7183 errorStream_ << "RtApiAlsa::probeDeviceOpen: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
7184 errorText_ = errorStream_.str();
\r
7189 result = snd_ctl_pcm_next_device( chandle, &subdevice );
\r
7190 if ( result < 0 ) break;
\r
7191 if ( subdevice < 0 ) break;
\r
7192 if ( nDevices == device ) {
\r
7193 sprintf( name, "hw:%d,%d", card, subdevice );
\r
7194 snd_ctl_close( chandle );
\r
7199 snd_ctl_close( chandle );
\r
7200 snd_card_next( &card );
\r
7203 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
\r
7204 if ( result == 0 ) {
\r
7205 if ( nDevices == device ) {
\r
7206 strcpy( name, "default" );
\r
7212 if ( nDevices == 0 ) {
\r
7213 // This should not happen because a check is made before this function is called.
\r
7214 errorText_ = "RtApiAlsa::probeDeviceOpen: no devices found!";
\r
7218 if ( device >= nDevices ) {
\r
7219 // This should not happen because a check is made before this function is called.
\r
7220 errorText_ = "RtApiAlsa::probeDeviceOpen: device ID is invalid!";
\r
7227 // The getDeviceInfo() function will not work for a device that is
\r
7228 // already open. Thus, we'll probe the system before opening a
\r
7229 // stream and save the results for use by getDeviceInfo().
\r
7230 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) // only do once
\r
7231 this->saveDeviceInfo();
\r
7233 snd_pcm_stream_t stream;
\r
7234 if ( mode == OUTPUT )
\r
7235 stream = SND_PCM_STREAM_PLAYBACK;
\r
7237 stream = SND_PCM_STREAM_CAPTURE;
\r
7239 snd_pcm_t *phandle;
\r
7240 int openMode = SND_PCM_ASYNC;
\r
7241 result = snd_pcm_open( &phandle, name, stream, openMode );
\r
7242 if ( result < 0 ) {
\r
7243 if ( mode == OUTPUT )
\r
7244 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for output.";
\r
7246 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for input.";
\r
7247 errorText_ = errorStream_.str();
\r
7251 // Fill the parameter structure.
\r
7252 snd_pcm_hw_params_t *hw_params;
\r
7253 snd_pcm_hw_params_alloca( &hw_params );
\r
7254 result = snd_pcm_hw_params_any( phandle, hw_params );
\r
7255 if ( result < 0 ) {
\r
7256 snd_pcm_close( phandle );
\r
7257 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") parameters, " << snd_strerror( result ) << ".";
\r
7258 errorText_ = errorStream_.str();
\r
7262 #if defined(__RTAUDIO_DEBUG__)
\r
7263 fprintf( stderr, "\nRtApiAlsa: dump hardware params just after device open:\n\n" );
\r
7264 snd_pcm_hw_params_dump( hw_params, out );
\r
7267 // Set access ... check user preference.
\r
7268 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) {
\r
7269 stream_.userInterleaved = false;
\r
7270 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
\r
7271 if ( result < 0 ) {
\r
7272 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
\r
7273 stream_.deviceInterleaved[mode] = true;
\r
7276 stream_.deviceInterleaved[mode] = false;
\r
7279 stream_.userInterleaved = true;
\r
7280 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
\r
7281 if ( result < 0 ) {
\r
7282 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
\r
7283 stream_.deviceInterleaved[mode] = false;
\r
7286 stream_.deviceInterleaved[mode] = true;
\r
7289 if ( result < 0 ) {
\r
7290 snd_pcm_close( phandle );
\r
7291 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") access, " << snd_strerror( result ) << ".";
\r
7292 errorText_ = errorStream_.str();
\r
7296 // Determine how to set the device format.
\r
7297 stream_.userFormat = format;
\r
7298 snd_pcm_format_t deviceFormat = SND_PCM_FORMAT_UNKNOWN;
\r
7300 if ( format == RTAUDIO_SINT8 )
\r
7301 deviceFormat = SND_PCM_FORMAT_S8;
\r
7302 else if ( format == RTAUDIO_SINT16 )
\r
7303 deviceFormat = SND_PCM_FORMAT_S16;
\r
7304 else if ( format == RTAUDIO_SINT24 )
\r
7305 deviceFormat = SND_PCM_FORMAT_S24;
\r
7306 else if ( format == RTAUDIO_SINT32 )
\r
7307 deviceFormat = SND_PCM_FORMAT_S32;
\r
7308 else if ( format == RTAUDIO_FLOAT32 )
\r
7309 deviceFormat = SND_PCM_FORMAT_FLOAT;
\r
7310 else if ( format == RTAUDIO_FLOAT64 )
\r
7311 deviceFormat = SND_PCM_FORMAT_FLOAT64;
\r
7313 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat) == 0) {
\r
7314 stream_.deviceFormat[mode] = format;
\r
7318 // The user requested format is not natively supported by the device.
\r
7319 deviceFormat = SND_PCM_FORMAT_FLOAT64;
\r
7320 if ( snd_pcm_hw_params_test_format( phandle, hw_params, deviceFormat ) == 0 ) {
\r
7321 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
\r
7325 deviceFormat = SND_PCM_FORMAT_FLOAT;
\r
7326 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7327 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
7331 deviceFormat = SND_PCM_FORMAT_S32;
\r
7332 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7333 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
7337 deviceFormat = SND_PCM_FORMAT_S24;
\r
7338 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7339 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
7343 deviceFormat = SND_PCM_FORMAT_S16;
\r
7344 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7345 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
7349 deviceFormat = SND_PCM_FORMAT_S8;
\r
7350 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7351 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
7355 // If we get here, no supported format was found.
\r
7356 snd_pcm_close( phandle );
\r
7357 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device " << device << " data format not supported by RtAudio.";
\r
7358 errorText_ = errorStream_.str();
\r
7362 result = snd_pcm_hw_params_set_format( phandle, hw_params, deviceFormat );
\r
7363 if ( result < 0 ) {
\r
7364 snd_pcm_close( phandle );
\r
7365 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") data format, " << snd_strerror( result ) << ".";
\r
7366 errorText_ = errorStream_.str();
\r
7370 // Determine whether byte-swaping is necessary.
\r
7371 stream_.doByteSwap[mode] = false;
\r
7372 if ( deviceFormat != SND_PCM_FORMAT_S8 ) {
\r
7373 result = snd_pcm_format_cpu_endian( deviceFormat );
\r
7374 if ( result == 0 )
\r
7375 stream_.doByteSwap[mode] = true;
\r
7376 else if (result < 0) {
\r
7377 snd_pcm_close( phandle );
\r
7378 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") endian-ness, " << snd_strerror( result ) << ".";
\r
7379 errorText_ = errorStream_.str();
\r
7384 // Set the sample rate.
\r
7385 result = snd_pcm_hw_params_set_rate_near( phandle, hw_params, (unsigned int*) &sampleRate, 0 );
\r
7386 if ( result < 0 ) {
\r
7387 snd_pcm_close( phandle );
\r
7388 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting sample rate on device (" << name << "), " << snd_strerror( result ) << ".";
\r
7389 errorText_ = errorStream_.str();
\r
7393 // Determine the number of channels for this device. We support a possible
\r
7394 // minimum device channel number > than the value requested by the user.
\r
7395 stream_.nUserChannels[mode] = channels;
\r
7396 unsigned int value;
\r
7397 result = snd_pcm_hw_params_get_channels_max( hw_params, &value );
\r
7398 unsigned int deviceChannels = value;
\r
7399 if ( result < 0 || deviceChannels < channels + firstChannel ) {
\r
7400 snd_pcm_close( phandle );
\r
7401 errorStream_ << "RtApiAlsa::probeDeviceOpen: requested channel parameters not supported by device (" << name << "), " << snd_strerror( result ) << ".";
\r
7402 errorText_ = errorStream_.str();
\r
7406 result = snd_pcm_hw_params_get_channels_min( hw_params, &value );
\r
7407 if ( result < 0 ) {
\r
7408 snd_pcm_close( phandle );
\r
7409 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting minimum channels for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7410 errorText_ = errorStream_.str();
\r
7413 deviceChannels = value;
\r
7414 if ( deviceChannels < channels + firstChannel ) deviceChannels = channels + firstChannel;
\r
7415 stream_.nDeviceChannels[mode] = deviceChannels;
\r
7417 // Set the device channels.
\r
7418 result = snd_pcm_hw_params_set_channels( phandle, hw_params, deviceChannels );
\r
7419 if ( result < 0 ) {
\r
7420 snd_pcm_close( phandle );
\r
7421 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting channels for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7422 errorText_ = errorStream_.str();
\r
7426 // Set the buffer (or period) size.
\r
7428 snd_pcm_uframes_t periodSize = *bufferSize;
\r
7429 result = snd_pcm_hw_params_set_period_size_near( phandle, hw_params, &periodSize, &dir );
\r
7430 if ( result < 0 ) {
\r
7431 snd_pcm_close( phandle );
\r
7432 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting period size for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7433 errorText_ = errorStream_.str();
\r
7436 *bufferSize = periodSize;
\r
7438 // Set the buffer number, which in ALSA is referred to as the "period".
\r
7439 unsigned int periods = 0;
\r
7440 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) periods = 2;
\r
7441 if ( options && options->numberOfBuffers > 0 ) periods = options->numberOfBuffers;
\r
7442 if ( periods < 2 ) periods = 4; // a fairly safe default value
\r
7443 result = snd_pcm_hw_params_set_periods_near( phandle, hw_params, &periods, &dir );
\r
7444 if ( result < 0 ) {
\r
7445 snd_pcm_close( phandle );
\r
7446 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting periods for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7447 errorText_ = errorStream_.str();
\r
7451 // If attempting to setup a duplex stream, the bufferSize parameter
\r
7452 // MUST be the same in both directions!
\r
7453 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
\r
7454 snd_pcm_close( phandle );
\r
7455 errorStream_ << "RtApiAlsa::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << name << ").";
\r
7456 errorText_ = errorStream_.str();
\r
7460 stream_.bufferSize = *bufferSize;
\r
7462 // Install the hardware configuration
\r
7463 result = snd_pcm_hw_params( phandle, hw_params );
\r
7464 if ( result < 0 ) {
\r
7465 snd_pcm_close( phandle );
\r
7466 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing hardware configuration on device (" << name << "), " << snd_strerror( result ) << ".";
\r
7467 errorText_ = errorStream_.str();
\r
7471 #if defined(__RTAUDIO_DEBUG__)
\r
7472 fprintf(stderr, "\nRtApiAlsa: dump hardware params after installation:\n\n");
\r
7473 snd_pcm_hw_params_dump( hw_params, out );
\r
7476 // Set the software configuration to fill buffers with zeros and prevent device stopping on xruns.
\r
7477 snd_pcm_sw_params_t *sw_params = NULL;
\r
7478 snd_pcm_sw_params_alloca( &sw_params );
\r
7479 snd_pcm_sw_params_current( phandle, sw_params );
\r
7480 snd_pcm_sw_params_set_start_threshold( phandle, sw_params, *bufferSize );
\r
7481 snd_pcm_sw_params_set_stop_threshold( phandle, sw_params, ULONG_MAX );
\r
7482 snd_pcm_sw_params_set_silence_threshold( phandle, sw_params, 0 );
\r
7484 // The following two settings were suggested by Theo Veenker
\r
7485 //snd_pcm_sw_params_set_avail_min( phandle, sw_params, *bufferSize );
\r
7486 //snd_pcm_sw_params_set_xfer_align( phandle, sw_params, 1 );
\r
7488 // here are two options for a fix
\r
7489 //snd_pcm_sw_params_set_silence_size( phandle, sw_params, ULONG_MAX );
\r
7490 snd_pcm_uframes_t val;
\r
7491 snd_pcm_sw_params_get_boundary( sw_params, &val );
\r
7492 snd_pcm_sw_params_set_silence_size( phandle, sw_params, val );
\r
7494 result = snd_pcm_sw_params( phandle, sw_params );
\r
7495 if ( result < 0 ) {
\r
7496 snd_pcm_close( phandle );
\r
7497 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing software configuration on device (" << name << "), " << snd_strerror( result ) << ".";
\r
7498 errorText_ = errorStream_.str();
\r
7502 #if defined(__RTAUDIO_DEBUG__)
\r
7503 fprintf(stderr, "\nRtApiAlsa: dump software params after installation:\n\n");
\r
7504 snd_pcm_sw_params_dump( sw_params, out );
\r
7507 // Set flags for buffer conversion
\r
7508 stream_.doConvertBuffer[mode] = false;
\r
7509 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
7510 stream_.doConvertBuffer[mode] = true;
\r
7511 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
7512 stream_.doConvertBuffer[mode] = true;
\r
7513 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
7514 stream_.nUserChannels[mode] > 1 )
\r
7515 stream_.doConvertBuffer[mode] = true;
\r
7517 // Allocate the ApiHandle if necessary and then save.
\r
7518 AlsaHandle *apiInfo = 0;
\r
7519 if ( stream_.apiHandle == 0 ) {
\r
7521 apiInfo = (AlsaHandle *) new AlsaHandle;
\r
7523 catch ( std::bad_alloc& ) {
\r
7524 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating AlsaHandle memory.";
\r
7528 if ( pthread_cond_init( &apiInfo->runnable_cv, NULL ) ) {
\r
7529 errorText_ = "RtApiAlsa::probeDeviceOpen: error initializing pthread condition variable.";
\r
7533 stream_.apiHandle = (void *) apiInfo;
\r
7534 apiInfo->handles[0] = 0;
\r
7535 apiInfo->handles[1] = 0;
\r
7538 apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7540 apiInfo->handles[mode] = phandle;
\r
7543 // Allocate necessary internal buffers.
\r
7544 unsigned long bufferBytes;
\r
7545 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
7546 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
7547 if ( stream_.userBuffer[mode] == NULL ) {
\r
7548 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating user buffer memory.";
\r
7552 if ( stream_.doConvertBuffer[mode] ) {
\r
7554 bool makeBuffer = true;
\r
7555 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
7556 if ( mode == INPUT ) {
\r
7557 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
7558 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
7559 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
7563 if ( makeBuffer ) {
\r
7564 bufferBytes *= *bufferSize;
\r
7565 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
7566 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
7567 if ( stream_.deviceBuffer == NULL ) {
\r
7568 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating device buffer memory.";
\r
7574 stream_.sampleRate = sampleRate;
\r
7575 stream_.nBuffers = periods;
\r
7576 stream_.device[mode] = device;
\r
7577 stream_.state = STREAM_STOPPED;
\r
7579 // Setup the buffer conversion information structure.
\r
7580 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
7582 // Setup thread if necessary.
\r
7583 if ( stream_.mode == OUTPUT && mode == INPUT ) {
\r
7584 // We had already set up an output stream.
\r
7585 stream_.mode = DUPLEX;
\r
7586 // Link the streams if possible.
\r
7587 apiInfo->synchronized = false;
\r
7588 if ( snd_pcm_link( apiInfo->handles[0], apiInfo->handles[1] ) == 0 )
\r
7589 apiInfo->synchronized = true;
\r
7591 errorText_ = "RtApiAlsa::probeDeviceOpen: unable to synchronize input and output devices.";
\r
7592 error( RtAudioError::WARNING );
\r
7596 stream_.mode = mode;
\r
7598 // Setup callback thread.
\r
7599 stream_.callbackInfo.object = (void *) this;
\r
7601 // Set the thread attributes for joinable and realtime scheduling
\r
7602 // priority (optional). The higher priority will only take affect
\r
7603 // if the program is run as root or suid. Note, under Linux
\r
7604 // processes with CAP_SYS_NICE privilege, a user can change
\r
7605 // scheduling policy and priority (thus need not be root). See
\r
7606 // POSIX "capabilities".
\r
7607 pthread_attr_t attr;
\r
7608 pthread_attr_init( &attr );
\r
7609 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
\r
7611 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
7612 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
\r
7613 // We previously attempted to increase the audio callback priority
\r
7614 // to SCHED_RR here via the attributes. However, while no errors
\r
7615 // were reported in doing so, it did not work. So, now this is
\r
7616 // done in the alsaCallbackHandler function.
\r
7617 stream_.callbackInfo.doRealtime = true;
\r
7618 int priority = options->priority;
\r
7619 int min = sched_get_priority_min( SCHED_RR );
\r
7620 int max = sched_get_priority_max( SCHED_RR );
\r
7621 if ( priority < min ) priority = min;
\r
7622 else if ( priority > max ) priority = max;
\r
7623 stream_.callbackInfo.priority = priority;
\r
7627 stream_.callbackInfo.isRunning = true;
\r
7628 result = pthread_create( &stream_.callbackInfo.thread, &attr, alsaCallbackHandler, &stream_.callbackInfo );
\r
7629 pthread_attr_destroy( &attr );
\r
7631 stream_.callbackInfo.isRunning = false;
\r
7632 errorText_ = "RtApiAlsa::error creating callback thread!";
\r
7641 pthread_cond_destroy( &apiInfo->runnable_cv );
\r
7642 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
\r
7643 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
\r
7645 stream_.apiHandle = 0;
\r
7648 if ( phandle) snd_pcm_close( phandle );
\r
7650 for ( int i=0; i<2; i++ ) {
\r
7651 if ( stream_.userBuffer[i] ) {
\r
7652 free( stream_.userBuffer[i] );
\r
7653 stream_.userBuffer[i] = 0;
\r
7657 if ( stream_.deviceBuffer ) {
\r
7658 free( stream_.deviceBuffer );
\r
7659 stream_.deviceBuffer = 0;
\r
7662 stream_.state = STREAM_CLOSED;
\r
7666 void RtApiAlsa :: closeStream()
\r
7668 if ( stream_.state == STREAM_CLOSED ) {
\r
7669 errorText_ = "RtApiAlsa::closeStream(): no open stream to close!";
\r
7670 error( RtAudioError::WARNING );
\r
7674 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7675 stream_.callbackInfo.isRunning = false;
\r
7676 MUTEX_LOCK( &stream_.mutex );
\r
7677 if ( stream_.state == STREAM_STOPPED ) {
\r
7678 apiInfo->runnable = true;
\r
7679 pthread_cond_signal( &apiInfo->runnable_cv );
\r
7681 MUTEX_UNLOCK( &stream_.mutex );
\r
7682 pthread_join( stream_.callbackInfo.thread, NULL );
\r
7684 if ( stream_.state == STREAM_RUNNING ) {
\r
7685 stream_.state = STREAM_STOPPED;
\r
7686 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
7687 snd_pcm_drop( apiInfo->handles[0] );
\r
7688 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
\r
7689 snd_pcm_drop( apiInfo->handles[1] );
\r
7693 pthread_cond_destroy( &apiInfo->runnable_cv );
\r
7694 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
\r
7695 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
\r
7697 stream_.apiHandle = 0;
\r
7700 for ( int i=0; i<2; i++ ) {
\r
7701 if ( stream_.userBuffer[i] ) {
\r
7702 free( stream_.userBuffer[i] );
\r
7703 stream_.userBuffer[i] = 0;
\r
7707 if ( stream_.deviceBuffer ) {
\r
7708 free( stream_.deviceBuffer );
\r
7709 stream_.deviceBuffer = 0;
\r
7712 stream_.mode = UNINITIALIZED;
\r
7713 stream_.state = STREAM_CLOSED;
\r
7716 void RtApiAlsa :: startStream()
\r
7718 // This method calls snd_pcm_prepare if the device isn't already in that state.
\r
7721 if ( stream_.state == STREAM_RUNNING ) {
\r
7722 errorText_ = "RtApiAlsa::startStream(): the stream is already running!";
\r
7723 error( RtAudioError::WARNING );
\r
7727 MUTEX_LOCK( &stream_.mutex );
\r
7730 snd_pcm_state_t state;
\r
7731 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7732 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
7733 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7734 state = snd_pcm_state( handle[0] );
\r
7735 if ( state != SND_PCM_STATE_PREPARED ) {
\r
7736 result = snd_pcm_prepare( handle[0] );
\r
7737 if ( result < 0 ) {
\r
7738 errorStream_ << "RtApiAlsa::startStream: error preparing output pcm device, " << snd_strerror( result ) << ".";
\r
7739 errorText_ = errorStream_.str();
\r
7745 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
7746 result = snd_pcm_drop(handle[1]); // fix to remove stale data received since device has been open
\r
7747 state = snd_pcm_state( handle[1] );
\r
7748 if ( state != SND_PCM_STATE_PREPARED ) {
\r
7749 result = snd_pcm_prepare( handle[1] );
\r
7750 if ( result < 0 ) {
\r
7751 errorStream_ << "RtApiAlsa::startStream: error preparing input pcm device, " << snd_strerror( result ) << ".";
\r
7752 errorText_ = errorStream_.str();
\r
7758 stream_.state = STREAM_RUNNING;
\r
7761 apiInfo->runnable = true;
\r
7762 pthread_cond_signal( &apiInfo->runnable_cv );
\r
7763 MUTEX_UNLOCK( &stream_.mutex );
\r
7765 if ( result >= 0 ) return;
\r
7766 error( RtAudioError::SYSTEM_ERROR );
\r
7769 void RtApiAlsa :: stopStream()
\r
7772 if ( stream_.state == STREAM_STOPPED ) {
\r
7773 errorText_ = "RtApiAlsa::stopStream(): the stream is already stopped!";
\r
7774 error( RtAudioError::WARNING );
\r
7778 stream_.state = STREAM_STOPPED;
\r
7779 MUTEX_LOCK( &stream_.mutex );
\r
7782 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7783 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
7784 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7785 if ( apiInfo->synchronized )
\r
7786 result = snd_pcm_drop( handle[0] );
\r
7788 result = snd_pcm_drain( handle[0] );
\r
7789 if ( result < 0 ) {
\r
7790 errorStream_ << "RtApiAlsa::stopStream: error draining output pcm device, " << snd_strerror( result ) << ".";
\r
7791 errorText_ = errorStream_.str();
\r
7796 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
7797 result = snd_pcm_drop( handle[1] );
\r
7798 if ( result < 0 ) {
\r
7799 errorStream_ << "RtApiAlsa::stopStream: error stopping input pcm device, " << snd_strerror( result ) << ".";
\r
7800 errorText_ = errorStream_.str();
\r
7806 apiInfo->runnable = false; // fixes high CPU usage when stopped
\r
7807 MUTEX_UNLOCK( &stream_.mutex );
\r
7809 if ( result >= 0 ) return;
\r
7810 error( RtAudioError::SYSTEM_ERROR );
\r
7813 void RtApiAlsa :: abortStream()
\r
7816 if ( stream_.state == STREAM_STOPPED ) {
\r
7817 errorText_ = "RtApiAlsa::abortStream(): the stream is already stopped!";
\r
7818 error( RtAudioError::WARNING );
\r
7822 stream_.state = STREAM_STOPPED;
\r
7823 MUTEX_LOCK( &stream_.mutex );
\r
7826 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7827 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
7828 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7829 result = snd_pcm_drop( handle[0] );
\r
7830 if ( result < 0 ) {
\r
7831 errorStream_ << "RtApiAlsa::abortStream: error aborting output pcm device, " << snd_strerror( result ) << ".";
\r
7832 errorText_ = errorStream_.str();
\r
7837 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
7838 result = snd_pcm_drop( handle[1] );
\r
7839 if ( result < 0 ) {
\r
7840 errorStream_ << "RtApiAlsa::abortStream: error aborting input pcm device, " << snd_strerror( result ) << ".";
\r
7841 errorText_ = errorStream_.str();
\r
7847 apiInfo->runnable = false; // fixes high CPU usage when stopped
\r
7848 MUTEX_UNLOCK( &stream_.mutex );
\r
7850 if ( result >= 0 ) return;
\r
7851 error( RtAudioError::SYSTEM_ERROR );
\r
7854 void RtApiAlsa :: callbackEvent()
\r
7856 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7857 if ( stream_.state == STREAM_STOPPED ) {
\r
7858 MUTEX_LOCK( &stream_.mutex );
\r
7859 while ( !apiInfo->runnable )
\r
7860 pthread_cond_wait( &apiInfo->runnable_cv, &stream_.mutex );
\r
7862 if ( stream_.state != STREAM_RUNNING ) {
\r
7863 MUTEX_UNLOCK( &stream_.mutex );
\r
7866 MUTEX_UNLOCK( &stream_.mutex );
\r
7869 if ( stream_.state == STREAM_CLOSED ) {
\r
7870 errorText_ = "RtApiAlsa::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
7871 error( RtAudioError::WARNING );
\r
7875 int doStopStream = 0;
\r
7876 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
7877 double streamTime = getStreamTime();
\r
7878 RtAudioStreamStatus status = 0;
\r
7879 if ( stream_.mode != INPUT && apiInfo->xrun[0] == true ) {
\r
7880 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
7881 apiInfo->xrun[0] = false;
\r
7883 if ( stream_.mode != OUTPUT && apiInfo->xrun[1] == true ) {
\r
7884 status |= RTAUDIO_INPUT_OVERFLOW;
\r
7885 apiInfo->xrun[1] = false;
\r
7887 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
7888 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
\r
7890 if ( doStopStream == 2 ) {
\r
7895 MUTEX_LOCK( &stream_.mutex );
\r
7897 // The state might change while waiting on a mutex.
\r
7898 if ( stream_.state == STREAM_STOPPED ) goto unlock;
\r
7903 snd_pcm_t **handle;
\r
7904 snd_pcm_sframes_t frames;
\r
7905 RtAudioFormat format;
\r
7906 handle = (snd_pcm_t **) apiInfo->handles;
\r
7908 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
7910 // Setup parameters.
\r
7911 if ( stream_.doConvertBuffer[1] ) {
\r
7912 buffer = stream_.deviceBuffer;
\r
7913 channels = stream_.nDeviceChannels[1];
\r
7914 format = stream_.deviceFormat[1];
\r
7917 buffer = stream_.userBuffer[1];
\r
7918 channels = stream_.nUserChannels[1];
\r
7919 format = stream_.userFormat;
\r
7922 // Read samples from device in interleaved/non-interleaved format.
\r
7923 if ( stream_.deviceInterleaved[1] )
\r
7924 result = snd_pcm_readi( handle[1], buffer, stream_.bufferSize );
\r
7926 void *bufs[channels];
\r
7927 size_t offset = stream_.bufferSize * formatBytes( format );
\r
7928 for ( int i=0; i<channels; i++ )
\r
7929 bufs[i] = (void *) (buffer + (i * offset));
\r
7930 result = snd_pcm_readn( handle[1], bufs, stream_.bufferSize );
\r
7933 if ( result < (int) stream_.bufferSize ) {
\r
7934 // Either an error or overrun occured.
\r
7935 if ( result == -EPIPE ) {
\r
7936 snd_pcm_state_t state = snd_pcm_state( handle[1] );
\r
7937 if ( state == SND_PCM_STATE_XRUN ) {
\r
7938 apiInfo->xrun[1] = true;
\r
7939 result = snd_pcm_prepare( handle[1] );
\r
7940 if ( result < 0 ) {
\r
7941 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after overrun, " << snd_strerror( result ) << ".";
\r
7942 errorText_ = errorStream_.str();
\r
7946 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
\r
7947 errorText_ = errorStream_.str();
\r
7951 errorStream_ << "RtApiAlsa::callbackEvent: audio read error, " << snd_strerror( result ) << ".";
\r
7952 errorText_ = errorStream_.str();
\r
7954 error( RtAudioError::WARNING );
\r
7958 // Do byte swapping if necessary.
\r
7959 if ( stream_.doByteSwap[1] )
\r
7960 byteSwapBuffer( buffer, stream_.bufferSize * channels, format );
\r
7962 // Do buffer conversion if necessary.
\r
7963 if ( stream_.doConvertBuffer[1] )
\r
7964 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
7966 // Check stream latency
\r
7967 result = snd_pcm_delay( handle[1], &frames );
\r
7968 if ( result == 0 && frames > 0 ) stream_.latency[1] = frames;
\r
7973 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7975 // Setup parameters and do buffer conversion if necessary.
\r
7976 if ( stream_.doConvertBuffer[0] ) {
\r
7977 buffer = stream_.deviceBuffer;
\r
7978 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
7979 channels = stream_.nDeviceChannels[0];
\r
7980 format = stream_.deviceFormat[0];
\r
7983 buffer = stream_.userBuffer[0];
\r
7984 channels = stream_.nUserChannels[0];
\r
7985 format = stream_.userFormat;
\r
7988 // Do byte swapping if necessary.
\r
7989 if ( stream_.doByteSwap[0] )
\r
7990 byteSwapBuffer(buffer, stream_.bufferSize * channels, format);
\r
7992 // Write samples to device in interleaved/non-interleaved format.
\r
7993 if ( stream_.deviceInterleaved[0] )
\r
7994 result = snd_pcm_writei( handle[0], buffer, stream_.bufferSize );
\r
7996 void *bufs[channels];
\r
7997 size_t offset = stream_.bufferSize * formatBytes( format );
\r
7998 for ( int i=0; i<channels; i++ )
\r
7999 bufs[i] = (void *) (buffer + (i * offset));
\r
8000 result = snd_pcm_writen( handle[0], bufs, stream_.bufferSize );
\r
8003 if ( result < (int) stream_.bufferSize ) {
\r
8004 // Either an error or underrun occured.
\r
8005 if ( result == -EPIPE ) {
\r
8006 snd_pcm_state_t state = snd_pcm_state( handle[0] );
\r
8007 if ( state == SND_PCM_STATE_XRUN ) {
\r
8008 apiInfo->xrun[0] = true;
\r
8009 result = snd_pcm_prepare( handle[0] );
\r
8010 if ( result < 0 ) {
\r
8011 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after underrun, " << snd_strerror( result ) << ".";
\r
8012 errorText_ = errorStream_.str();
\r
8016 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
\r
8017 errorText_ = errorStream_.str();
\r
8021 errorStream_ << "RtApiAlsa::callbackEvent: audio write error, " << snd_strerror( result ) << ".";
\r
8022 errorText_ = errorStream_.str();
\r
8024 error( RtAudioError::WARNING );
\r
8028 // Check stream latency
\r
8029 result = snd_pcm_delay( handle[0], &frames );
\r
8030 if ( result == 0 && frames > 0 ) stream_.latency[0] = frames;
\r
8034 MUTEX_UNLOCK( &stream_.mutex );
\r
8036 RtApi::tickStreamTime();
\r
8037 if ( doStopStream == 1 ) this->stopStream();
\r
8040 static void *alsaCallbackHandler( void *ptr )
\r
8042 CallbackInfo *info = (CallbackInfo *) ptr;
\r
8043 RtApiAlsa *object = (RtApiAlsa *) info->object;
\r
8044 bool *isRunning = &info->isRunning;
\r
8046 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
8047 if ( &info->doRealtime ) {
\r
8048 pthread_t tID = pthread_self(); // ID of this thread
\r
8049 sched_param prio = { info->priority }; // scheduling priority of thread
\r
8050 pthread_setschedparam( tID, SCHED_RR, &prio );
\r
8054 while ( *isRunning == true ) {
\r
8055 pthread_testcancel();
\r
8056 object->callbackEvent();
\r
8059 pthread_exit( NULL );
\r
8062 //******************** End of __LINUX_ALSA__ *********************//
\r
8065 #if defined(__LINUX_PULSE__)
\r
8067 // Code written by Peter Meerwald, pmeerw@pmeerw.net
\r
8068 // and Tristan Matthews.
\r
8070 #include <pulse/error.h>
\r
8071 #include <pulse/simple.h>
\r
8074 static const unsigned int SUPPORTED_SAMPLERATES[] = { 8000, 16000, 22050, 32000,
\r
8075 44100, 48000, 96000, 0};
\r
8077 struct rtaudio_pa_format_mapping_t {
\r
8078 RtAudioFormat rtaudio_format;
\r
8079 pa_sample_format_t pa_format;
\r
8082 static const rtaudio_pa_format_mapping_t supported_sampleformats[] = {
\r
8083 {RTAUDIO_SINT16, PA_SAMPLE_S16LE},
\r
8084 {RTAUDIO_SINT32, PA_SAMPLE_S32LE},
\r
8085 {RTAUDIO_FLOAT32, PA_SAMPLE_FLOAT32LE},
\r
8086 {0, PA_SAMPLE_INVALID}};
\r
8088 struct PulseAudioHandle {
\r
8089 pa_simple *s_play;
\r
8092 pthread_cond_t runnable_cv;
\r
8094 PulseAudioHandle() : s_play(0), s_rec(0), runnable(false) { }
\r
8097 RtApiPulse::~RtApiPulse()
\r
8099 if ( stream_.state != STREAM_CLOSED )
\r
8103 unsigned int RtApiPulse::getDeviceCount( void )
\r
8108 RtAudio::DeviceInfo RtApiPulse::getDeviceInfo( unsigned int /*device*/ )
\r
8110 RtAudio::DeviceInfo info;
\r
8111 info.probed = true;
\r
8112 info.name = "PulseAudio";
\r
8113 info.outputChannels = 2;
\r
8114 info.inputChannels = 2;
\r
8115 info.duplexChannels = 2;
\r
8116 info.isDefaultOutput = true;
\r
8117 info.isDefaultInput = true;
\r
8119 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr )
\r
8120 info.sampleRates.push_back( *sr );
\r
8122 info.preferredSampleRate = 48000;
\r
8123 info.nativeFormats = RTAUDIO_SINT16 | RTAUDIO_SINT32 | RTAUDIO_FLOAT32;
\r
8128 static void *pulseaudio_callback( void * user )
\r
8130 CallbackInfo *cbi = static_cast<CallbackInfo *>( user );
\r
8131 RtApiPulse *context = static_cast<RtApiPulse *>( cbi->object );
\r
8132 volatile bool *isRunning = &cbi->isRunning;
\r
8134 while ( *isRunning ) {
\r
8135 pthread_testcancel();
\r
8136 context->callbackEvent();
\r
8139 pthread_exit( NULL );
\r
8142 void RtApiPulse::closeStream( void )
\r
8144 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8146 stream_.callbackInfo.isRunning = false;
\r
8148 MUTEX_LOCK( &stream_.mutex );
\r
8149 if ( stream_.state == STREAM_STOPPED ) {
\r
8150 pah->runnable = true;
\r
8151 pthread_cond_signal( &pah->runnable_cv );
\r
8153 MUTEX_UNLOCK( &stream_.mutex );
\r
8155 pthread_join( pah->thread, 0 );
\r
8156 if ( pah->s_play ) {
\r
8157 pa_simple_flush( pah->s_play, NULL );
\r
8158 pa_simple_free( pah->s_play );
\r
8161 pa_simple_free( pah->s_rec );
\r
8163 pthread_cond_destroy( &pah->runnable_cv );
\r
8165 stream_.apiHandle = 0;
\r
8168 if ( stream_.userBuffer[0] ) {
\r
8169 free( stream_.userBuffer[0] );
\r
8170 stream_.userBuffer[0] = 0;
\r
8172 if ( stream_.userBuffer[1] ) {
\r
8173 free( stream_.userBuffer[1] );
\r
8174 stream_.userBuffer[1] = 0;
\r
8177 stream_.state = STREAM_CLOSED;
\r
8178 stream_.mode = UNINITIALIZED;
\r
8181 void RtApiPulse::callbackEvent( void )
\r
8183 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8185 if ( stream_.state == STREAM_STOPPED ) {
\r
8186 MUTEX_LOCK( &stream_.mutex );
\r
8187 while ( !pah->runnable )
\r
8188 pthread_cond_wait( &pah->runnable_cv, &stream_.mutex );
\r
8190 if ( stream_.state != STREAM_RUNNING ) {
\r
8191 MUTEX_UNLOCK( &stream_.mutex );
\r
8194 MUTEX_UNLOCK( &stream_.mutex );
\r
8197 if ( stream_.state == STREAM_CLOSED ) {
\r
8198 errorText_ = "RtApiPulse::callbackEvent(): the stream is closed ... "
\r
8199 "this shouldn't happen!";
\r
8200 error( RtAudioError::WARNING );
\r
8204 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
8205 double streamTime = getStreamTime();
\r
8206 RtAudioStreamStatus status = 0;
\r
8207 int doStopStream = callback( stream_.userBuffer[OUTPUT], stream_.userBuffer[INPUT],
\r
8208 stream_.bufferSize, streamTime, status,
\r
8209 stream_.callbackInfo.userData );
\r
8211 if ( doStopStream == 2 ) {
\r
8216 MUTEX_LOCK( &stream_.mutex );
\r
8217 void *pulse_in = stream_.doConvertBuffer[INPUT] ? stream_.deviceBuffer : stream_.userBuffer[INPUT];
\r
8218 void *pulse_out = stream_.doConvertBuffer[OUTPUT] ? stream_.deviceBuffer : stream_.userBuffer[OUTPUT];
\r
8220 if ( stream_.state != STREAM_RUNNING )
\r
8225 if (stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
8226 if ( stream_.doConvertBuffer[OUTPUT] ) {
\r
8227 convertBuffer( stream_.deviceBuffer,
\r
8228 stream_.userBuffer[OUTPUT],
\r
8229 stream_.convertInfo[OUTPUT] );
\r
8230 bytes = stream_.nDeviceChannels[OUTPUT] * stream_.bufferSize *
\r
8231 formatBytes( stream_.deviceFormat[OUTPUT] );
\r
8233 bytes = stream_.nUserChannels[OUTPUT] * stream_.bufferSize *
\r
8234 formatBytes( stream_.userFormat );
\r
8236 if ( pa_simple_write( pah->s_play, pulse_out, bytes, &pa_error ) < 0 ) {
\r
8237 errorStream_ << "RtApiPulse::callbackEvent: audio write error, " <<
\r
8238 pa_strerror( pa_error ) << ".";
\r
8239 errorText_ = errorStream_.str();
\r
8240 error( RtAudioError::WARNING );
\r
8244 if ( stream_.mode == INPUT || stream_.mode == DUPLEX) {
\r
8245 if ( stream_.doConvertBuffer[INPUT] )
\r
8246 bytes = stream_.nDeviceChannels[INPUT] * stream_.bufferSize *
\r
8247 formatBytes( stream_.deviceFormat[INPUT] );
\r
8249 bytes = stream_.nUserChannels[INPUT] * stream_.bufferSize *
\r
8250 formatBytes( stream_.userFormat );
\r
8252 if ( pa_simple_read( pah->s_rec, pulse_in, bytes, &pa_error ) < 0 ) {
\r
8253 errorStream_ << "RtApiPulse::callbackEvent: audio read error, " <<
\r
8254 pa_strerror( pa_error ) << ".";
\r
8255 errorText_ = errorStream_.str();
\r
8256 error( RtAudioError::WARNING );
\r
8258 if ( stream_.doConvertBuffer[INPUT] ) {
\r
8259 convertBuffer( stream_.userBuffer[INPUT],
\r
8260 stream_.deviceBuffer,
\r
8261 stream_.convertInfo[INPUT] );
\r
8266 MUTEX_UNLOCK( &stream_.mutex );
\r
8267 RtApi::tickStreamTime();
\r
8269 if ( doStopStream == 1 )
\r
8273 void RtApiPulse::startStream( void )
\r
8275 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8277 if ( stream_.state == STREAM_CLOSED ) {
\r
8278 errorText_ = "RtApiPulse::startStream(): the stream is not open!";
\r
8279 error( RtAudioError::INVALID_USE );
\r
8282 if ( stream_.state == STREAM_RUNNING ) {
\r
8283 errorText_ = "RtApiPulse::startStream(): the stream is already running!";
\r
8284 error( RtAudioError::WARNING );
\r
8288 MUTEX_LOCK( &stream_.mutex );
\r
8290 stream_.state = STREAM_RUNNING;
\r
8292 pah->runnable = true;
\r
8293 pthread_cond_signal( &pah->runnable_cv );
\r
8294 MUTEX_UNLOCK( &stream_.mutex );
\r
8297 void RtApiPulse::stopStream( void )
\r
8299 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8301 if ( stream_.state == STREAM_CLOSED ) {
\r
8302 errorText_ = "RtApiPulse::stopStream(): the stream is not open!";
\r
8303 error( RtAudioError::INVALID_USE );
\r
8306 if ( stream_.state == STREAM_STOPPED ) {
\r
8307 errorText_ = "RtApiPulse::stopStream(): the stream is already stopped!";
\r
8308 error( RtAudioError::WARNING );
\r
8312 stream_.state = STREAM_STOPPED;
\r
8313 MUTEX_LOCK( &stream_.mutex );
\r
8315 if ( pah && pah->s_play ) {
\r
8317 if ( pa_simple_drain( pah->s_play, &pa_error ) < 0 ) {
\r
8318 errorStream_ << "RtApiPulse::stopStream: error draining output device, " <<
\r
8319 pa_strerror( pa_error ) << ".";
\r
8320 errorText_ = errorStream_.str();
\r
8321 MUTEX_UNLOCK( &stream_.mutex );
\r
8322 error( RtAudioError::SYSTEM_ERROR );
\r
8327 stream_.state = STREAM_STOPPED;
\r
8328 MUTEX_UNLOCK( &stream_.mutex );
\r
8331 void RtApiPulse::abortStream( void )
\r
8333 PulseAudioHandle *pah = static_cast<PulseAudioHandle*>( stream_.apiHandle );
\r
8335 if ( stream_.state == STREAM_CLOSED ) {
\r
8336 errorText_ = "RtApiPulse::abortStream(): the stream is not open!";
\r
8337 error( RtAudioError::INVALID_USE );
\r
8340 if ( stream_.state == STREAM_STOPPED ) {
\r
8341 errorText_ = "RtApiPulse::abortStream(): the stream is already stopped!";
\r
8342 error( RtAudioError::WARNING );
\r
8346 stream_.state = STREAM_STOPPED;
\r
8347 MUTEX_LOCK( &stream_.mutex );
\r
8349 if ( pah && pah->s_play ) {
\r
8351 if ( pa_simple_flush( pah->s_play, &pa_error ) < 0 ) {
\r
8352 errorStream_ << "RtApiPulse::abortStream: error flushing output device, " <<
\r
8353 pa_strerror( pa_error ) << ".";
\r
8354 errorText_ = errorStream_.str();
\r
8355 MUTEX_UNLOCK( &stream_.mutex );
\r
8356 error( RtAudioError::SYSTEM_ERROR );
\r
8361 stream_.state = STREAM_STOPPED;
\r
8362 MUTEX_UNLOCK( &stream_.mutex );
\r
8365 bool RtApiPulse::probeDeviceOpen( unsigned int device, StreamMode mode,
\r
8366 unsigned int channels, unsigned int firstChannel,
\r
8367 unsigned int sampleRate, RtAudioFormat format,
\r
8368 unsigned int *bufferSize, RtAudio::StreamOptions *options )
\r
8370 PulseAudioHandle *pah = 0;
\r
8371 unsigned long bufferBytes = 0;
\r
8372 pa_sample_spec ss;
\r
8374 if ( device != 0 ) return false;
\r
8375 if ( mode != INPUT && mode != OUTPUT ) return false;
\r
8376 if ( channels != 1 && channels != 2 ) {
\r
8377 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported number of channels.";
\r
8380 ss.channels = channels;
\r
8382 if ( firstChannel != 0 ) return false;
\r
8384 bool sr_found = false;
\r
8385 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr ) {
\r
8386 if ( sampleRate == *sr ) {
\r
8388 stream_.sampleRate = sampleRate;
\r
8389 ss.rate = sampleRate;
\r
8393 if ( !sr_found ) {
\r
8394 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported sample rate.";
\r
8398 bool sf_found = 0;
\r
8399 for ( const rtaudio_pa_format_mapping_t *sf = supported_sampleformats;
\r
8400 sf->rtaudio_format && sf->pa_format != PA_SAMPLE_INVALID; ++sf ) {
\r
8401 if ( format == sf->rtaudio_format ) {
\r
8403 stream_.userFormat = sf->rtaudio_format;
\r
8404 stream_.deviceFormat[mode] = stream_.userFormat;
\r
8405 ss.format = sf->pa_format;
\r
8409 if ( !sf_found ) { // Use internal data format conversion.
\r
8410 stream_.userFormat = format;
\r
8411 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
8412 ss.format = PA_SAMPLE_FLOAT32LE;
\r
8415 // Set other stream parameters.
\r
8416 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
8417 else stream_.userInterleaved = true;
\r
8418 stream_.deviceInterleaved[mode] = true;
\r
8419 stream_.nBuffers = 1;
\r
8420 stream_.doByteSwap[mode] = false;
\r
8421 stream_.nUserChannels[mode] = channels;
\r
8422 stream_.nDeviceChannels[mode] = channels + firstChannel;
\r
8423 stream_.channelOffset[mode] = 0;
\r
8424 std::string streamName = "RtAudio";
\r
8426 // Set flags for buffer conversion.
\r
8427 stream_.doConvertBuffer[mode] = false;
\r
8428 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
8429 stream_.doConvertBuffer[mode] = true;
\r
8430 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
8431 stream_.doConvertBuffer[mode] = true;
\r
8433 // Allocate necessary internal buffers.
\r
8434 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
8435 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
8436 if ( stream_.userBuffer[mode] == NULL ) {
\r
8437 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating user buffer memory.";
\r
8440 stream_.bufferSize = *bufferSize;
\r
8442 if ( stream_.doConvertBuffer[mode] ) {
\r
8444 bool makeBuffer = true;
\r
8445 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
8446 if ( mode == INPUT ) {
\r
8447 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
8448 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
8449 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
8453 if ( makeBuffer ) {
\r
8454 bufferBytes *= *bufferSize;
\r
8455 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
8456 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
8457 if ( stream_.deviceBuffer == NULL ) {
\r
8458 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating device buffer memory.";
\r
8464 stream_.device[mode] = device;
\r
8466 // Setup the buffer conversion information structure.
\r
8467 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
8469 if ( !stream_.apiHandle ) {
\r
8470 PulseAudioHandle *pah = new PulseAudioHandle;
\r
8472 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating memory for handle.";
\r
8476 stream_.apiHandle = pah;
\r
8477 if ( pthread_cond_init( &pah->runnable_cv, NULL ) != 0 ) {
\r
8478 errorText_ = "RtApiPulse::probeDeviceOpen: error creating condition variable.";
\r
8482 pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8485 if ( options && !options->streamName.empty() ) streamName = options->streamName;
\r
8488 pa_buffer_attr buffer_attr;
\r
8489 buffer_attr.fragsize = bufferBytes;
\r
8490 buffer_attr.maxlength = -1;
\r
8492 pah->s_rec = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_RECORD, NULL, "Record", &ss, NULL, &buffer_attr, &error );
\r
8493 if ( !pah->s_rec ) {
\r
8494 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting input to PulseAudio server.";
\r
8499 pah->s_play = pa_simple_new( NULL, "RtAudio", PA_STREAM_PLAYBACK, NULL, "Playback", &ss, NULL, NULL, &error );
\r
8500 if ( !pah->s_play ) {
\r
8501 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting output to PulseAudio server.";
\r
8509 if ( stream_.mode == UNINITIALIZED )
\r
8510 stream_.mode = mode;
\r
8511 else if ( stream_.mode == mode )
\r
8514 stream_.mode = DUPLEX;
\r
8516 if ( !stream_.callbackInfo.isRunning ) {
\r
8517 stream_.callbackInfo.object = this;
\r
8518 stream_.callbackInfo.isRunning = true;
\r
8519 if ( pthread_create( &pah->thread, NULL, pulseaudio_callback, (void *)&stream_.callbackInfo) != 0 ) {
\r
8520 errorText_ = "RtApiPulse::probeDeviceOpen: error creating thread.";
\r
8525 stream_.state = STREAM_STOPPED;
\r
8529 if ( pah && stream_.callbackInfo.isRunning ) {
\r
8530 pthread_cond_destroy( &pah->runnable_cv );
\r
8532 stream_.apiHandle = 0;
\r
8535 for ( int i=0; i<2; i++ ) {
\r
8536 if ( stream_.userBuffer[i] ) {
\r
8537 free( stream_.userBuffer[i] );
\r
8538 stream_.userBuffer[i] = 0;
\r
8542 if ( stream_.deviceBuffer ) {
\r
8543 free( stream_.deviceBuffer );
\r
8544 stream_.deviceBuffer = 0;
\r
8550 //******************** End of __LINUX_PULSE__ *********************//
\r
8553 #if defined(__LINUX_OSS__)
\r
8555 #include <unistd.h>
\r
8556 #include <sys/ioctl.h>
\r
8557 #include <unistd.h>
\r
8558 #include <fcntl.h>
\r
8559 #include <sys/soundcard.h>
\r
8560 #include <errno.h>
\r
8563 static void *ossCallbackHandler(void * ptr);
\r
8565 // A structure to hold various information related to the OSS API
\r
8566 // implementation.
\r
8567 struct OssHandle {
\r
8568 int id[2]; // device ids
\r
8571 pthread_cond_t runnable;
\r
8574 :triggered(false) { id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
8577 RtApiOss :: RtApiOss()
\r
8579 // Nothing to do here.
\r
8582 RtApiOss :: ~RtApiOss()
\r
8584 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
8587 unsigned int RtApiOss :: getDeviceCount( void )
\r
8589 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
8590 if ( mixerfd == -1 ) {
\r
8591 errorText_ = "RtApiOss::getDeviceCount: error opening '/dev/mixer'.";
\r
8592 error( RtAudioError::WARNING );
\r
8596 oss_sysinfo sysinfo;
\r
8597 if ( ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo ) == -1 ) {
\r
8599 errorText_ = "RtApiOss::getDeviceCount: error getting sysinfo, OSS version >= 4.0 is required.";
\r
8600 error( RtAudioError::WARNING );
\r
8605 return sysinfo.numaudios;
\r
8608 RtAudio::DeviceInfo RtApiOss :: getDeviceInfo( unsigned int device )
\r
8610 RtAudio::DeviceInfo info;
\r
8611 info.probed = false;
\r
8613 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
8614 if ( mixerfd == -1 ) {
\r
8615 errorText_ = "RtApiOss::getDeviceInfo: error opening '/dev/mixer'.";
\r
8616 error( RtAudioError::WARNING );
\r
8620 oss_sysinfo sysinfo;
\r
8621 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
\r
8622 if ( result == -1 ) {
\r
8624 errorText_ = "RtApiOss::getDeviceInfo: error getting sysinfo, OSS version >= 4.0 is required.";
\r
8625 error( RtAudioError::WARNING );
\r
8629 unsigned nDevices = sysinfo.numaudios;
\r
8630 if ( nDevices == 0 ) {
\r
8632 errorText_ = "RtApiOss::getDeviceInfo: no devices found!";
\r
8633 error( RtAudioError::INVALID_USE );
\r
8637 if ( device >= nDevices ) {
\r
8639 errorText_ = "RtApiOss::getDeviceInfo: device ID is invalid!";
\r
8640 error( RtAudioError::INVALID_USE );
\r
8644 oss_audioinfo ainfo;
\r
8645 ainfo.dev = device;
\r
8646 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
\r
8648 if ( result == -1 ) {
\r
8649 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
\r
8650 errorText_ = errorStream_.str();
\r
8651 error( RtAudioError::WARNING );
\r
8656 if ( ainfo.caps & PCM_CAP_OUTPUT ) info.outputChannels = ainfo.max_channels;
\r
8657 if ( ainfo.caps & PCM_CAP_INPUT ) info.inputChannels = ainfo.max_channels;
\r
8658 if ( ainfo.caps & PCM_CAP_DUPLEX ) {
\r
8659 if ( info.outputChannels > 0 && info.inputChannels > 0 && ainfo.caps & PCM_CAP_DUPLEX )
\r
8660 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
8663 // Probe data formats ... do for input
\r
8664 unsigned long mask = ainfo.iformats;
\r
8665 if ( mask & AFMT_S16_LE || mask & AFMT_S16_BE )
\r
8666 info.nativeFormats |= RTAUDIO_SINT16;
\r
8667 if ( mask & AFMT_S8 )
\r
8668 info.nativeFormats |= RTAUDIO_SINT8;
\r
8669 if ( mask & AFMT_S32_LE || mask & AFMT_S32_BE )
\r
8670 info.nativeFormats |= RTAUDIO_SINT32;
\r
8671 if ( mask & AFMT_FLOAT )
\r
8672 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
8673 if ( mask & AFMT_S24_LE || mask & AFMT_S24_BE )
\r
8674 info.nativeFormats |= RTAUDIO_SINT24;
\r
8676 // Check that we have at least one supported format
\r
8677 if ( info.nativeFormats == 0 ) {
\r
8678 errorStream_ << "RtApiOss::getDeviceInfo: device (" << ainfo.name << ") data format not supported by RtAudio.";
\r
8679 errorText_ = errorStream_.str();
\r
8680 error( RtAudioError::WARNING );
\r
8684 // Probe the supported sample rates.
\r
8685 info.sampleRates.clear();
\r
8686 if ( ainfo.nrates ) {
\r
8687 for ( unsigned int i=0; i<ainfo.nrates; i++ ) {
\r
8688 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
8689 if ( ainfo.rates[i] == SAMPLE_RATES[k] ) {
\r
8690 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
8692 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
\r
8693 info.preferredSampleRate = SAMPLE_RATES[k];
\r
8701 // Check min and max rate values;
\r
8702 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
8703 if ( ainfo.min_rate <= (int) SAMPLE_RATES[k] && ainfo.max_rate >= (int) SAMPLE_RATES[k] ) {
\r
8704 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
8706 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
\r
8707 info.preferredSampleRate = SAMPLE_RATES[k];
\r
8712 if ( info.sampleRates.size() == 0 ) {
\r
8713 errorStream_ << "RtApiOss::getDeviceInfo: no supported sample rates found for device (" << ainfo.name << ").";
\r
8714 errorText_ = errorStream_.str();
\r
8715 error( RtAudioError::WARNING );
\r
8718 info.probed = true;
\r
8719 info.name = ainfo.name;
\r
8726 bool RtApiOss :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
8727 unsigned int firstChannel, unsigned int sampleRate,
\r
8728 RtAudioFormat format, unsigned int *bufferSize,
\r
8729 RtAudio::StreamOptions *options )
\r
8731 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
8732 if ( mixerfd == -1 ) {
\r
8733 errorText_ = "RtApiOss::probeDeviceOpen: error opening '/dev/mixer'.";
\r
8737 oss_sysinfo sysinfo;
\r
8738 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
\r
8739 if ( result == -1 ) {
\r
8741 errorText_ = "RtApiOss::probeDeviceOpen: error getting sysinfo, OSS version >= 4.0 is required.";
\r
8745 unsigned nDevices = sysinfo.numaudios;
\r
8746 if ( nDevices == 0 ) {
\r
8747 // This should not happen because a check is made before this function is called.
\r
8749 errorText_ = "RtApiOss::probeDeviceOpen: no devices found!";
\r
8753 if ( device >= nDevices ) {
\r
8754 // This should not happen because a check is made before this function is called.
\r
8756 errorText_ = "RtApiOss::probeDeviceOpen: device ID is invalid!";
\r
8760 oss_audioinfo ainfo;
\r
8761 ainfo.dev = device;
\r
8762 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
\r
8764 if ( result == -1 ) {
\r
8765 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
\r
8766 errorText_ = errorStream_.str();
\r
8770 // Check if device supports input or output
\r
8771 if ( ( mode == OUTPUT && !( ainfo.caps & PCM_CAP_OUTPUT ) ) ||
\r
8772 ( mode == INPUT && !( ainfo.caps & PCM_CAP_INPUT ) ) ) {
\r
8773 if ( mode == OUTPUT )
\r
8774 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support output.";
\r
8776 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support input.";
\r
8777 errorText_ = errorStream_.str();
\r
8782 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
8783 if ( mode == OUTPUT )
\r
8784 flags |= O_WRONLY;
\r
8785 else { // mode == INPUT
\r
8786 if (stream_.mode == OUTPUT && stream_.device[0] == device) {
\r
8787 // We just set the same device for playback ... close and reopen for duplex (OSS only).
\r
8788 close( handle->id[0] );
\r
8789 handle->id[0] = 0;
\r
8790 if ( !( ainfo.caps & PCM_CAP_DUPLEX ) ) {
\r
8791 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support duplex mode.";
\r
8792 errorText_ = errorStream_.str();
\r
8795 // Check that the number previously set channels is the same.
\r
8796 if ( stream_.nUserChannels[0] != channels ) {
\r
8797 errorStream_ << "RtApiOss::probeDeviceOpen: input/output channels must be equal for OSS duplex device (" << ainfo.name << ").";
\r
8798 errorText_ = errorStream_.str();
\r
8804 flags |= O_RDONLY;
\r
8807 // Set exclusive access if specified.
\r
8808 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) flags |= O_EXCL;
\r
8810 // Try to open the device.
\r
8812 fd = open( ainfo.devnode, flags, 0 );
\r
8814 if ( errno == EBUSY )
\r
8815 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") is busy.";
\r
8817 errorStream_ << "RtApiOss::probeDeviceOpen: error opening device (" << ainfo.name << ").";
\r
8818 errorText_ = errorStream_.str();
\r
8822 // For duplex operation, specifically set this mode (this doesn't seem to work).
\r
8824 if ( flags | O_RDWR ) {
\r
8825 result = ioctl( fd, SNDCTL_DSP_SETDUPLEX, NULL );
\r
8826 if ( result == -1) {
\r
8827 errorStream_ << "RtApiOss::probeDeviceOpen: error setting duplex mode for device (" << ainfo.name << ").";
\r
8828 errorText_ = errorStream_.str();
\r
8834 // Check the device channel support.
\r
8835 stream_.nUserChannels[mode] = channels;
\r
8836 if ( ainfo.max_channels < (int)(channels + firstChannel) ) {
\r
8838 errorStream_ << "RtApiOss::probeDeviceOpen: the device (" << ainfo.name << ") does not support requested channel parameters.";
\r
8839 errorText_ = errorStream_.str();
\r
8843 // Set the number of channels.
\r
8844 int deviceChannels = channels + firstChannel;
\r
8845 result = ioctl( fd, SNDCTL_DSP_CHANNELS, &deviceChannels );
\r
8846 if ( result == -1 || deviceChannels < (int)(channels + firstChannel) ) {
\r
8848 errorStream_ << "RtApiOss::probeDeviceOpen: error setting channel parameters on device (" << ainfo.name << ").";
\r
8849 errorText_ = errorStream_.str();
\r
8852 stream_.nDeviceChannels[mode] = deviceChannels;
\r
8854 // Get the data format mask
\r
8856 result = ioctl( fd, SNDCTL_DSP_GETFMTS, &mask );
\r
8857 if ( result == -1 ) {
\r
8859 errorStream_ << "RtApiOss::probeDeviceOpen: error getting device (" << ainfo.name << ") data formats.";
\r
8860 errorText_ = errorStream_.str();
\r
8864 // Determine how to set the device format.
\r
8865 stream_.userFormat = format;
\r
8866 int deviceFormat = -1;
\r
8867 stream_.doByteSwap[mode] = false;
\r
8868 if ( format == RTAUDIO_SINT8 ) {
\r
8869 if ( mask & AFMT_S8 ) {
\r
8870 deviceFormat = AFMT_S8;
\r
8871 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
8874 else if ( format == RTAUDIO_SINT16 ) {
\r
8875 if ( mask & AFMT_S16_NE ) {
\r
8876 deviceFormat = AFMT_S16_NE;
\r
8877 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
8879 else if ( mask & AFMT_S16_OE ) {
\r
8880 deviceFormat = AFMT_S16_OE;
\r
8881 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
8882 stream_.doByteSwap[mode] = true;
\r
8885 else if ( format == RTAUDIO_SINT24 ) {
\r
8886 if ( mask & AFMT_S24_NE ) {
\r
8887 deviceFormat = AFMT_S24_NE;
\r
8888 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
8890 else if ( mask & AFMT_S24_OE ) {
\r
8891 deviceFormat = AFMT_S24_OE;
\r
8892 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
8893 stream_.doByteSwap[mode] = true;
\r
8896 else if ( format == RTAUDIO_SINT32 ) {
\r
8897 if ( mask & AFMT_S32_NE ) {
\r
8898 deviceFormat = AFMT_S32_NE;
\r
8899 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
8901 else if ( mask & AFMT_S32_OE ) {
\r
8902 deviceFormat = AFMT_S32_OE;
\r
8903 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
8904 stream_.doByteSwap[mode] = true;
\r
8908 if ( deviceFormat == -1 ) {
\r
8909 // The user requested format is not natively supported by the device.
\r
8910 if ( mask & AFMT_S16_NE ) {
\r
8911 deviceFormat = AFMT_S16_NE;
\r
8912 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
8914 else if ( mask & AFMT_S32_NE ) {
\r
8915 deviceFormat = AFMT_S32_NE;
\r
8916 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
8918 else if ( mask & AFMT_S24_NE ) {
\r
8919 deviceFormat = AFMT_S24_NE;
\r
8920 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
8922 else if ( mask & AFMT_S16_OE ) {
\r
8923 deviceFormat = AFMT_S16_OE;
\r
8924 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
8925 stream_.doByteSwap[mode] = true;
\r
8927 else if ( mask & AFMT_S32_OE ) {
\r
8928 deviceFormat = AFMT_S32_OE;
\r
8929 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
8930 stream_.doByteSwap[mode] = true;
\r
8932 else if ( mask & AFMT_S24_OE ) {
\r
8933 deviceFormat = AFMT_S24_OE;
\r
8934 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
8935 stream_.doByteSwap[mode] = true;
\r
8937 else if ( mask & AFMT_S8) {
\r
8938 deviceFormat = AFMT_S8;
\r
8939 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
8943 if ( stream_.deviceFormat[mode] == 0 ) {
\r
8944 // This really shouldn't happen ...
\r
8946 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") data format not supported by RtAudio.";
\r
8947 errorText_ = errorStream_.str();
\r
8951 // Set the data format.
\r
8952 int temp = deviceFormat;
\r
8953 result = ioctl( fd, SNDCTL_DSP_SETFMT, &deviceFormat );
\r
8954 if ( result == -1 || deviceFormat != temp ) {
\r
8956 errorStream_ << "RtApiOss::probeDeviceOpen: error setting data format on device (" << ainfo.name << ").";
\r
8957 errorText_ = errorStream_.str();
\r
8961 // Attempt to set the buffer size. According to OSS, the minimum
\r
8962 // number of buffers is two. The supposed minimum buffer size is 16
\r
8963 // bytes, so that will be our lower bound. The argument to this
\r
8964 // call is in the form 0xMMMMSSSS (hex), where the buffer size (in
\r
8965 // bytes) is given as 2^SSSS and the number of buffers as 2^MMMM.
\r
8966 // We'll check the actual value used near the end of the setup
\r
8968 int ossBufferBytes = *bufferSize * formatBytes( stream_.deviceFormat[mode] ) * deviceChannels;
\r
8969 if ( ossBufferBytes < 16 ) ossBufferBytes = 16;
\r
8971 if ( options ) buffers = options->numberOfBuffers;
\r
8972 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) buffers = 2;
\r
8973 if ( buffers < 2 ) buffers = 3;
\r
8974 temp = ((int) buffers << 16) + (int)( log10( (double)ossBufferBytes ) / log10( 2.0 ) );
\r
8975 result = ioctl( fd, SNDCTL_DSP_SETFRAGMENT, &temp );
\r
8976 if ( result == -1 ) {
\r
8978 errorStream_ << "RtApiOss::probeDeviceOpen: error setting buffer size on device (" << ainfo.name << ").";
\r
8979 errorText_ = errorStream_.str();
\r
8982 stream_.nBuffers = buffers;
\r
8984 // Save buffer size (in sample frames).
\r
8985 *bufferSize = ossBufferBytes / ( formatBytes(stream_.deviceFormat[mode]) * deviceChannels );
\r
8986 stream_.bufferSize = *bufferSize;
\r
8988 // Set the sample rate.
\r
8989 int srate = sampleRate;
\r
8990 result = ioctl( fd, SNDCTL_DSP_SPEED, &srate );
\r
8991 if ( result == -1 ) {
\r
8993 errorStream_ << "RtApiOss::probeDeviceOpen: error setting sample rate (" << sampleRate << ") on device (" << ainfo.name << ").";
\r
8994 errorText_ = errorStream_.str();
\r
8998 // Verify the sample rate setup worked.
\r
8999 if ( abs( srate - sampleRate ) > 100 ) {
\r
9001 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support sample rate (" << sampleRate << ").";
\r
9002 errorText_ = errorStream_.str();
\r
9005 stream_.sampleRate = sampleRate;
\r
9007 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device) {
\r
9008 // We're doing duplex setup here.
\r
9009 stream_.deviceFormat[0] = stream_.deviceFormat[1];
\r
9010 stream_.nDeviceChannels[0] = deviceChannels;
\r
9013 // Set interleaving parameters.
\r
9014 stream_.userInterleaved = true;
\r
9015 stream_.deviceInterleaved[mode] = true;
\r
9016 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
\r
9017 stream_.userInterleaved = false;
\r
9019 // Set flags for buffer conversion
\r
9020 stream_.doConvertBuffer[mode] = false;
\r
9021 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
9022 stream_.doConvertBuffer[mode] = true;
\r
9023 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
9024 stream_.doConvertBuffer[mode] = true;
\r
9025 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
9026 stream_.nUserChannels[mode] > 1 )
\r
9027 stream_.doConvertBuffer[mode] = true;
\r
9029 // Allocate the stream handles if necessary and then save.
\r
9030 if ( stream_.apiHandle == 0 ) {
\r
9032 handle = new OssHandle;
\r
9034 catch ( std::bad_alloc& ) {
\r
9035 errorText_ = "RtApiOss::probeDeviceOpen: error allocating OssHandle memory.";
\r
9039 if ( pthread_cond_init( &handle->runnable, NULL ) ) {
\r
9040 errorText_ = "RtApiOss::probeDeviceOpen: error initializing pthread condition variable.";
\r
9044 stream_.apiHandle = (void *) handle;
\r
9047 handle = (OssHandle *) stream_.apiHandle;
\r
9049 handle->id[mode] = fd;
\r
9051 // Allocate necessary internal buffers.
\r
9052 unsigned long bufferBytes;
\r
9053 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
9054 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
9055 if ( stream_.userBuffer[mode] == NULL ) {
\r
9056 errorText_ = "RtApiOss::probeDeviceOpen: error allocating user buffer memory.";
\r
9060 if ( stream_.doConvertBuffer[mode] ) {
\r
9062 bool makeBuffer = true;
\r
9063 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
9064 if ( mode == INPUT ) {
\r
9065 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
9066 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
9067 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
9071 if ( makeBuffer ) {
\r
9072 bufferBytes *= *bufferSize;
\r
9073 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
9074 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
9075 if ( stream_.deviceBuffer == NULL ) {
\r
9076 errorText_ = "RtApiOss::probeDeviceOpen: error allocating device buffer memory.";
\r
9082 stream_.device[mode] = device;
\r
9083 stream_.state = STREAM_STOPPED;
\r
9085 // Setup the buffer conversion information structure.
\r
9086 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
9088 // Setup thread if necessary.
\r
9089 if ( stream_.mode == OUTPUT && mode == INPUT ) {
\r
9090 // We had already set up an output stream.
\r
9091 stream_.mode = DUPLEX;
\r
9092 if ( stream_.device[0] == device ) handle->id[0] = fd;
\r
9095 stream_.mode = mode;
\r
9097 // Setup callback thread.
\r
9098 stream_.callbackInfo.object = (void *) this;
\r
9100 // Set the thread attributes for joinable and realtime scheduling
\r
9101 // priority. The higher priority will only take affect if the
\r
9102 // program is run as root or suid.
\r
9103 pthread_attr_t attr;
\r
9104 pthread_attr_init( &attr );
\r
9105 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
\r
9106 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
9107 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
\r
9108 struct sched_param param;
\r
9109 int priority = options->priority;
\r
9110 int min = sched_get_priority_min( SCHED_RR );
\r
9111 int max = sched_get_priority_max( SCHED_RR );
\r
9112 if ( priority < min ) priority = min;
\r
9113 else if ( priority > max ) priority = max;
\r
9114 param.sched_priority = priority;
\r
9115 pthread_attr_setschedparam( &attr, ¶m );
\r
9116 pthread_attr_setschedpolicy( &attr, SCHED_RR );
\r
9119 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
\r
9121 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
\r
9124 stream_.callbackInfo.isRunning = true;
\r
9125 result = pthread_create( &stream_.callbackInfo.thread, &attr, ossCallbackHandler, &stream_.callbackInfo );
\r
9126 pthread_attr_destroy( &attr );
\r
9128 stream_.callbackInfo.isRunning = false;
\r
9129 errorText_ = "RtApiOss::error creating callback thread!";
\r
9138 pthread_cond_destroy( &handle->runnable );
\r
9139 if ( handle->id[0] ) close( handle->id[0] );
\r
9140 if ( handle->id[1] ) close( handle->id[1] );
\r
9142 stream_.apiHandle = 0;
\r
9145 for ( int i=0; i<2; i++ ) {
\r
9146 if ( stream_.userBuffer[i] ) {
\r
9147 free( stream_.userBuffer[i] );
\r
9148 stream_.userBuffer[i] = 0;
\r
9152 if ( stream_.deviceBuffer ) {
\r
9153 free( stream_.deviceBuffer );
\r
9154 stream_.deviceBuffer = 0;
\r
9160 void RtApiOss :: closeStream()
\r
9162 if ( stream_.state == STREAM_CLOSED ) {
\r
9163 errorText_ = "RtApiOss::closeStream(): no open stream to close!";
\r
9164 error( RtAudioError::WARNING );
\r
9168 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9169 stream_.callbackInfo.isRunning = false;
\r
9170 MUTEX_LOCK( &stream_.mutex );
\r
9171 if ( stream_.state == STREAM_STOPPED )
\r
9172 pthread_cond_signal( &handle->runnable );
\r
9173 MUTEX_UNLOCK( &stream_.mutex );
\r
9174 pthread_join( stream_.callbackInfo.thread, NULL );
\r
9176 if ( stream_.state == STREAM_RUNNING ) {
\r
9177 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
9178 ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
9180 ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
9181 stream_.state = STREAM_STOPPED;
\r
9185 pthread_cond_destroy( &handle->runnable );
\r
9186 if ( handle->id[0] ) close( handle->id[0] );
\r
9187 if ( handle->id[1] ) close( handle->id[1] );
\r
9189 stream_.apiHandle = 0;
\r
9192 for ( int i=0; i<2; i++ ) {
\r
9193 if ( stream_.userBuffer[i] ) {
\r
9194 free( stream_.userBuffer[i] );
\r
9195 stream_.userBuffer[i] = 0;
\r
9199 if ( stream_.deviceBuffer ) {
\r
9200 free( stream_.deviceBuffer );
\r
9201 stream_.deviceBuffer = 0;
\r
9204 stream_.mode = UNINITIALIZED;
\r
9205 stream_.state = STREAM_CLOSED;
\r
9208 void RtApiOss :: startStream()
\r
9211 if ( stream_.state == STREAM_RUNNING ) {
\r
9212 errorText_ = "RtApiOss::startStream(): the stream is already running!";
\r
9213 error( RtAudioError::WARNING );
\r
9217 MUTEX_LOCK( &stream_.mutex );
\r
9219 stream_.state = STREAM_RUNNING;
\r
9221 // No need to do anything else here ... OSS automatically starts
\r
9222 // when fed samples.
\r
9224 MUTEX_UNLOCK( &stream_.mutex );
\r
9226 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9227 pthread_cond_signal( &handle->runnable );
\r
9230 void RtApiOss :: stopStream()
\r
9233 if ( stream_.state == STREAM_STOPPED ) {
\r
9234 errorText_ = "RtApiOss::stopStream(): the stream is already stopped!";
\r
9235 error( RtAudioError::WARNING );
\r
9239 MUTEX_LOCK( &stream_.mutex );
\r
9241 // The state might change while waiting on a mutex.
\r
9242 if ( stream_.state == STREAM_STOPPED ) {
\r
9243 MUTEX_UNLOCK( &stream_.mutex );
\r
9248 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9249 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
9251 // Flush the output with zeros a few times.
\r
9254 RtAudioFormat format;
\r
9256 if ( stream_.doConvertBuffer[0] ) {
\r
9257 buffer = stream_.deviceBuffer;
\r
9258 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
9259 format = stream_.deviceFormat[0];
\r
9262 buffer = stream_.userBuffer[0];
\r
9263 samples = stream_.bufferSize * stream_.nUserChannels[0];
\r
9264 format = stream_.userFormat;
\r
9267 memset( buffer, 0, samples * formatBytes(format) );
\r
9268 for ( unsigned int i=0; i<stream_.nBuffers+1; i++ ) {
\r
9269 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
9270 if ( result == -1 ) {
\r
9271 errorText_ = "RtApiOss::stopStream: audio write error.";
\r
9272 error( RtAudioError::WARNING );
\r
9276 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
9277 if ( result == -1 ) {
\r
9278 errorStream_ << "RtApiOss::stopStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
\r
9279 errorText_ = errorStream_.str();
\r
9282 handle->triggered = false;
\r
9285 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
\r
9286 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
9287 if ( result == -1 ) {
\r
9288 errorStream_ << "RtApiOss::stopStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
\r
9289 errorText_ = errorStream_.str();
\r
9295 stream_.state = STREAM_STOPPED;
\r
9296 MUTEX_UNLOCK( &stream_.mutex );
\r
9298 if ( result != -1 ) return;
\r
9299 error( RtAudioError::SYSTEM_ERROR );
\r
9302 void RtApiOss :: abortStream()
\r
9305 if ( stream_.state == STREAM_STOPPED ) {
\r
9306 errorText_ = "RtApiOss::abortStream(): the stream is already stopped!";
\r
9307 error( RtAudioError::WARNING );
\r
9311 MUTEX_LOCK( &stream_.mutex );
\r
9313 // The state might change while waiting on a mutex.
\r
9314 if ( stream_.state == STREAM_STOPPED ) {
\r
9315 MUTEX_UNLOCK( &stream_.mutex );
\r
9320 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9321 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
9322 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
9323 if ( result == -1 ) {
\r
9324 errorStream_ << "RtApiOss::abortStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
\r
9325 errorText_ = errorStream_.str();
\r
9328 handle->triggered = false;
\r
9331 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
\r
9332 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
9333 if ( result == -1 ) {
\r
9334 errorStream_ << "RtApiOss::abortStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
\r
9335 errorText_ = errorStream_.str();
\r
9341 stream_.state = STREAM_STOPPED;
\r
9342 MUTEX_UNLOCK( &stream_.mutex );
\r
9344 if ( result != -1 ) return;
\r
9345 error( RtAudioError::SYSTEM_ERROR );
\r
9348 void RtApiOss :: callbackEvent()
\r
9350 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9351 if ( stream_.state == STREAM_STOPPED ) {
\r
9352 MUTEX_LOCK( &stream_.mutex );
\r
9353 pthread_cond_wait( &handle->runnable, &stream_.mutex );
\r
9354 if ( stream_.state != STREAM_RUNNING ) {
\r
9355 MUTEX_UNLOCK( &stream_.mutex );
\r
9358 MUTEX_UNLOCK( &stream_.mutex );
\r
9361 if ( stream_.state == STREAM_CLOSED ) {
\r
9362 errorText_ = "RtApiOss::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
9363 error( RtAudioError::WARNING );
\r
9367 // Invoke user callback to get fresh output data.
\r
9368 int doStopStream = 0;
\r
9369 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
9370 double streamTime = getStreamTime();
\r
9371 RtAudioStreamStatus status = 0;
\r
9372 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
9373 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
9374 handle->xrun[0] = false;
\r
9376 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
9377 status |= RTAUDIO_INPUT_OVERFLOW;
\r
9378 handle->xrun[1] = false;
\r
9380 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
9381 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
\r
9382 if ( doStopStream == 2 ) {
\r
9383 this->abortStream();
\r
9387 MUTEX_LOCK( &stream_.mutex );
\r
9389 // The state might change while waiting on a mutex.
\r
9390 if ( stream_.state == STREAM_STOPPED ) goto unlock;
\r
9395 RtAudioFormat format;
\r
9397 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
9399 // Setup parameters and do buffer conversion if necessary.
\r
9400 if ( stream_.doConvertBuffer[0] ) {
\r
9401 buffer = stream_.deviceBuffer;
\r
9402 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
9403 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
9404 format = stream_.deviceFormat[0];
\r
9407 buffer = stream_.userBuffer[0];
\r
9408 samples = stream_.bufferSize * stream_.nUserChannels[0];
\r
9409 format = stream_.userFormat;
\r
9412 // Do byte swapping if necessary.
\r
9413 if ( stream_.doByteSwap[0] )
\r
9414 byteSwapBuffer( buffer, samples, format );
\r
9416 if ( stream_.mode == DUPLEX && handle->triggered == false ) {
\r
9418 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
\r
9419 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
9420 trig = PCM_ENABLE_INPUT|PCM_ENABLE_OUTPUT;
\r
9421 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
\r
9422 handle->triggered = true;
\r
9425 // Write samples to device.
\r
9426 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
9428 if ( result == -1 ) {
\r
9429 // We'll assume this is an underrun, though there isn't a
\r
9430 // specific means for determining that.
\r
9431 handle->xrun[0] = true;
\r
9432 errorText_ = "RtApiOss::callbackEvent: audio write error.";
\r
9433 error( RtAudioError::WARNING );
\r
9434 // Continue on to input section.
\r
9438 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
9440 // Setup parameters.
\r
9441 if ( stream_.doConvertBuffer[1] ) {
\r
9442 buffer = stream_.deviceBuffer;
\r
9443 samples = stream_.bufferSize * stream_.nDeviceChannels[1];
\r
9444 format = stream_.deviceFormat[1];
\r
9447 buffer = stream_.userBuffer[1];
\r
9448 samples = stream_.bufferSize * stream_.nUserChannels[1];
\r
9449 format = stream_.userFormat;
\r
9452 // Read samples from device.
\r
9453 result = read( handle->id[1], buffer, samples * formatBytes(format) );
\r
9455 if ( result == -1 ) {
\r
9456 // We'll assume this is an overrun, though there isn't a
\r
9457 // specific means for determining that.
\r
9458 handle->xrun[1] = true;
\r
9459 errorText_ = "RtApiOss::callbackEvent: audio read error.";
\r
9460 error( RtAudioError::WARNING );
\r
9464 // Do byte swapping if necessary.
\r
9465 if ( stream_.doByteSwap[1] )
\r
9466 byteSwapBuffer( buffer, samples, format );
\r
9468 // Do buffer conversion if necessary.
\r
9469 if ( stream_.doConvertBuffer[1] )
\r
9470 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
9474 MUTEX_UNLOCK( &stream_.mutex );
\r
9476 RtApi::tickStreamTime();
\r
9477 if ( doStopStream == 1 ) this->stopStream();
\r
9480 static void *ossCallbackHandler( void *ptr )
\r
9482 CallbackInfo *info = (CallbackInfo *) ptr;
\r
9483 RtApiOss *object = (RtApiOss *) info->object;
\r
9484 bool *isRunning = &info->isRunning;
\r
9486 while ( *isRunning == true ) {
\r
9487 pthread_testcancel();
\r
9488 object->callbackEvent();
\r
9491 pthread_exit( NULL );
\r
9494 //******************** End of __LINUX_OSS__ *********************//
\r
9498 // *************************************************** //
\r
9500 // Protected common (OS-independent) RtAudio methods.
\r
9502 // *************************************************** //
\r
9504 // This method can be modified to control the behavior of error
\r
9505 // message printing.
\r
9506 void RtApi :: error( RtAudioError::Type type )
\r
9508 errorStream_.str(""); // clear the ostringstream
\r
9510 RtAudioErrorCallback errorCallback = (RtAudioErrorCallback) stream_.callbackInfo.errorCallback;
\r
9511 if ( errorCallback ) {
\r
9512 // abortStream() can generate new error messages. Ignore them. Just keep original one.
\r
9514 if ( firstErrorOccurred_ )
\r
9517 firstErrorOccurred_ = true;
\r
9518 const std::string errorMessage = errorText_;
\r
9520 if ( type != RtAudioError::WARNING && stream_.state != STREAM_STOPPED) {
\r
9521 stream_.callbackInfo.isRunning = false; // exit from the thread
\r
9525 errorCallback( type, errorMessage );
\r
9526 firstErrorOccurred_ = false;
\r
9530 if ( type == RtAudioError::WARNING && showWarnings_ == true )
\r
9531 std::cerr << '\n' << errorText_ << "\n\n";
\r
9532 else if ( type != RtAudioError::WARNING )
\r
9533 throw( RtAudioError( errorText_, type ) );
\r
9536 void RtApi :: verifyStream()
\r
9538 if ( stream_.state == STREAM_CLOSED ) {
\r
9539 errorText_ = "RtApi:: a stream is not open!";
\r
9540 error( RtAudioError::INVALID_USE );
\r
9544 void RtApi :: clearStreamInfo()
\r
9546 stream_.mode = UNINITIALIZED;
\r
9547 stream_.state = STREAM_CLOSED;
\r
9548 stream_.sampleRate = 0;
\r
9549 stream_.bufferSize = 0;
\r
9550 stream_.nBuffers = 0;
\r
9551 stream_.userFormat = 0;
\r
9552 stream_.userInterleaved = true;
\r
9553 stream_.streamTime = 0.0;
\r
9554 stream_.apiHandle = 0;
\r
9555 stream_.deviceBuffer = 0;
\r
9556 stream_.callbackInfo.callback = 0;
\r
9557 stream_.callbackInfo.userData = 0;
\r
9558 stream_.callbackInfo.isRunning = false;
\r
9559 stream_.callbackInfo.errorCallback = 0;
\r
9560 for ( int i=0; i<2; i++ ) {
\r
9561 stream_.device[i] = 11111;
\r
9562 stream_.doConvertBuffer[i] = false;
\r
9563 stream_.deviceInterleaved[i] = true;
\r
9564 stream_.doByteSwap[i] = false;
\r
9565 stream_.nUserChannels[i] = 0;
\r
9566 stream_.nDeviceChannels[i] = 0;
\r
9567 stream_.channelOffset[i] = 0;
\r
9568 stream_.deviceFormat[i] = 0;
\r
9569 stream_.latency[i] = 0;
\r
9570 stream_.userBuffer[i] = 0;
\r
9571 stream_.convertInfo[i].channels = 0;
\r
9572 stream_.convertInfo[i].inJump = 0;
\r
9573 stream_.convertInfo[i].outJump = 0;
\r
9574 stream_.convertInfo[i].inFormat = 0;
\r
9575 stream_.convertInfo[i].outFormat = 0;
\r
9576 stream_.convertInfo[i].inOffset.clear();
\r
9577 stream_.convertInfo[i].outOffset.clear();
\r
9581 unsigned int RtApi :: formatBytes( RtAudioFormat format )
\r
9583 if ( format == RTAUDIO_SINT16 )
\r
9585 else if ( format == RTAUDIO_SINT32 || format == RTAUDIO_FLOAT32 )
\r
9587 else if ( format == RTAUDIO_FLOAT64 )
\r
9589 else if ( format == RTAUDIO_SINT24 )
\r
9591 else if ( format == RTAUDIO_SINT8 )
\r
9594 errorText_ = "RtApi::formatBytes: undefined format.";
\r
9595 error( RtAudioError::WARNING );
\r
9600 void RtApi :: setConvertInfo( StreamMode mode, unsigned int firstChannel )
\r
9602 if ( mode == INPUT ) { // convert device to user buffer
\r
9603 stream_.convertInfo[mode].inJump = stream_.nDeviceChannels[1];
\r
9604 stream_.convertInfo[mode].outJump = stream_.nUserChannels[1];
\r
9605 stream_.convertInfo[mode].inFormat = stream_.deviceFormat[1];
\r
9606 stream_.convertInfo[mode].outFormat = stream_.userFormat;
\r
9608 else { // convert user to device buffer
\r
9609 stream_.convertInfo[mode].inJump = stream_.nUserChannels[0];
\r
9610 stream_.convertInfo[mode].outJump = stream_.nDeviceChannels[0];
\r
9611 stream_.convertInfo[mode].inFormat = stream_.userFormat;
\r
9612 stream_.convertInfo[mode].outFormat = stream_.deviceFormat[0];
\r
9615 if ( stream_.convertInfo[mode].inJump < stream_.convertInfo[mode].outJump )
\r
9616 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].inJump;
\r
9618 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].outJump;
\r
9620 // Set up the interleave/deinterleave offsets.
\r
9621 if ( stream_.deviceInterleaved[mode] != stream_.userInterleaved ) {
\r
9622 if ( ( mode == OUTPUT && stream_.deviceInterleaved[mode] ) ||
\r
9623 ( mode == INPUT && stream_.userInterleaved ) ) {
\r
9624 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
9625 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
\r
9626 stream_.convertInfo[mode].outOffset.push_back( k );
\r
9627 stream_.convertInfo[mode].inJump = 1;
\r
9631 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
9632 stream_.convertInfo[mode].inOffset.push_back( k );
\r
9633 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
\r
9634 stream_.convertInfo[mode].outJump = 1;
\r
9638 else { // no (de)interleaving
\r
9639 if ( stream_.userInterleaved ) {
\r
9640 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
9641 stream_.convertInfo[mode].inOffset.push_back( k );
\r
9642 stream_.convertInfo[mode].outOffset.push_back( k );
\r
9646 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
9647 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
\r
9648 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
\r
9649 stream_.convertInfo[mode].inJump = 1;
\r
9650 stream_.convertInfo[mode].outJump = 1;
\r
9655 // Add channel offset.
\r
9656 if ( firstChannel > 0 ) {
\r
9657 if ( stream_.deviceInterleaved[mode] ) {
\r
9658 if ( mode == OUTPUT ) {
\r
9659 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
9660 stream_.convertInfo[mode].outOffset[k] += firstChannel;
\r
9663 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
9664 stream_.convertInfo[mode].inOffset[k] += firstChannel;
\r
9668 if ( mode == OUTPUT ) {
\r
9669 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
9670 stream_.convertInfo[mode].outOffset[k] += ( firstChannel * stream_.bufferSize );
\r
9673 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
9674 stream_.convertInfo[mode].inOffset[k] += ( firstChannel * stream_.bufferSize );
\r
9680 void RtApi :: convertBuffer( char *outBuffer, char *inBuffer, ConvertInfo &info )
\r
9682 // This function does format conversion, input/output channel compensation, and
\r
9683 // data interleaving/deinterleaving. 24-bit integers are assumed to occupy
\r
9684 // the lower three bytes of a 32-bit integer.
\r
9686 // Clear our device buffer when in/out duplex device channels are different
\r
9687 if ( outBuffer == stream_.deviceBuffer && stream_.mode == DUPLEX &&
\r
9688 ( stream_.nDeviceChannels[0] < stream_.nDeviceChannels[1] ) )
\r
9689 memset( outBuffer, 0, stream_.bufferSize * info.outJump * formatBytes( info.outFormat ) );
\r
9692 if (info.outFormat == RTAUDIO_FLOAT64) {
\r
9694 Float64 *out = (Float64 *)outBuffer;
\r
9696 if (info.inFormat == RTAUDIO_SINT8) {
\r
9697 signed char *in = (signed char *)inBuffer;
\r
9698 scale = 1.0 / 127.5;
\r
9699 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9700 for (j=0; j<info.channels; j++) {
\r
9701 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
9702 out[info.outOffset[j]] += 0.5;
\r
9703 out[info.outOffset[j]] *= scale;
\r
9705 in += info.inJump;
\r
9706 out += info.outJump;
\r
9709 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9710 Int16 *in = (Int16 *)inBuffer;
\r
9711 scale = 1.0 / 32767.5;
\r
9712 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9713 for (j=0; j<info.channels; j++) {
\r
9714 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
9715 out[info.outOffset[j]] += 0.5;
\r
9716 out[info.outOffset[j]] *= scale;
\r
9718 in += info.inJump;
\r
9719 out += info.outJump;
\r
9722 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9723 Int24 *in = (Int24 *)inBuffer;
\r
9724 scale = 1.0 / 8388607.5;
\r
9725 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9726 for (j=0; j<info.channels; j++) {
\r
9727 out[info.outOffset[j]] = (Float64) (in[info.inOffset[j]].asInt());
\r
9728 out[info.outOffset[j]] += 0.5;
\r
9729 out[info.outOffset[j]] *= scale;
\r
9731 in += info.inJump;
\r
9732 out += info.outJump;
\r
9735 else if (info.inFormat == RTAUDIO_SINT32) {
\r
9736 Int32 *in = (Int32 *)inBuffer;
\r
9737 scale = 1.0 / 2147483647.5;
\r
9738 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9739 for (j=0; j<info.channels; j++) {
\r
9740 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
9741 out[info.outOffset[j]] += 0.5;
\r
9742 out[info.outOffset[j]] *= scale;
\r
9744 in += info.inJump;
\r
9745 out += info.outJump;
\r
9748 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
9749 Float32 *in = (Float32 *)inBuffer;
\r
9750 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9751 for (j=0; j<info.channels; j++) {
\r
9752 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
9754 in += info.inJump;
\r
9755 out += info.outJump;
\r
9758 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
9759 // Channel compensation and/or (de)interleaving only.
\r
9760 Float64 *in = (Float64 *)inBuffer;
\r
9761 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9762 for (j=0; j<info.channels; j++) {
\r
9763 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9765 in += info.inJump;
\r
9766 out += info.outJump;
\r
9770 else if (info.outFormat == RTAUDIO_FLOAT32) {
\r
9772 Float32 *out = (Float32 *)outBuffer;
\r
9774 if (info.inFormat == RTAUDIO_SINT8) {
\r
9775 signed char *in = (signed char *)inBuffer;
\r
9776 scale = (Float32) ( 1.0 / 127.5 );
\r
9777 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9778 for (j=0; j<info.channels; j++) {
\r
9779 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
9780 out[info.outOffset[j]] += 0.5;
\r
9781 out[info.outOffset[j]] *= scale;
\r
9783 in += info.inJump;
\r
9784 out += info.outJump;
\r
9787 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9788 Int16 *in = (Int16 *)inBuffer;
\r
9789 scale = (Float32) ( 1.0 / 32767.5 );
\r
9790 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9791 for (j=0; j<info.channels; j++) {
\r
9792 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
9793 out[info.outOffset[j]] += 0.5;
\r
9794 out[info.outOffset[j]] *= scale;
\r
9796 in += info.inJump;
\r
9797 out += info.outJump;
\r
9800 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9801 Int24 *in = (Int24 *)inBuffer;
\r
9802 scale = (Float32) ( 1.0 / 8388607.5 );
\r
9803 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9804 for (j=0; j<info.channels; j++) {
\r
9805 out[info.outOffset[j]] = (Float32) (in[info.inOffset[j]].asInt());
\r
9806 out[info.outOffset[j]] += 0.5;
\r
9807 out[info.outOffset[j]] *= scale;
\r
9809 in += info.inJump;
\r
9810 out += info.outJump;
\r
9813 else if (info.inFormat == RTAUDIO_SINT32) {
\r
9814 Int32 *in = (Int32 *)inBuffer;
\r
9815 scale = (Float32) ( 1.0 / 2147483647.5 );
\r
9816 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9817 for (j=0; j<info.channels; j++) {
\r
9818 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
9819 out[info.outOffset[j]] += 0.5;
\r
9820 out[info.outOffset[j]] *= scale;
\r
9822 in += info.inJump;
\r
9823 out += info.outJump;
\r
9826 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
9827 // Channel compensation and/or (de)interleaving only.
\r
9828 Float32 *in = (Float32 *)inBuffer;
\r
9829 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9830 for (j=0; j<info.channels; j++) {
\r
9831 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9833 in += info.inJump;
\r
9834 out += info.outJump;
\r
9837 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
9838 Float64 *in = (Float64 *)inBuffer;
\r
9839 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9840 for (j=0; j<info.channels; j++) {
\r
9841 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
9843 in += info.inJump;
\r
9844 out += info.outJump;
\r
9848 else if (info.outFormat == RTAUDIO_SINT32) {
\r
9849 Int32 *out = (Int32 *)outBuffer;
\r
9850 if (info.inFormat == RTAUDIO_SINT8) {
\r
9851 signed char *in = (signed char *)inBuffer;
\r
9852 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9853 for (j=0; j<info.channels; j++) {
\r
9854 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
\r
9855 out[info.outOffset[j]] <<= 24;
\r
9857 in += info.inJump;
\r
9858 out += info.outJump;
\r
9861 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9862 Int16 *in = (Int16 *)inBuffer;
\r
9863 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9864 for (j=0; j<info.channels; j++) {
\r
9865 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
\r
9866 out[info.outOffset[j]] <<= 16;
\r
9868 in += info.inJump;
\r
9869 out += info.outJump;
\r
9872 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9873 Int24 *in = (Int24 *)inBuffer;
\r
9874 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9875 for (j=0; j<info.channels; j++) {
\r
9876 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]].asInt();
\r
9877 out[info.outOffset[j]] <<= 8;
\r
9879 in += info.inJump;
\r
9880 out += info.outJump;
\r
9883 else if (info.inFormat == RTAUDIO_SINT32) {
\r
9884 // Channel compensation and/or (de)interleaving only.
\r
9885 Int32 *in = (Int32 *)inBuffer;
\r
9886 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9887 for (j=0; j<info.channels; j++) {
\r
9888 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9890 in += info.inJump;
\r
9891 out += info.outJump;
\r
9894 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
9895 Float32 *in = (Float32 *)inBuffer;
\r
9896 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9897 for (j=0; j<info.channels; j++) {
\r
9898 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
\r
9900 in += info.inJump;
\r
9901 out += info.outJump;
\r
9904 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
9905 Float64 *in = (Float64 *)inBuffer;
\r
9906 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9907 for (j=0; j<info.channels; j++) {
\r
9908 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
\r
9910 in += info.inJump;
\r
9911 out += info.outJump;
\r
9915 else if (info.outFormat == RTAUDIO_SINT24) {
\r
9916 Int24 *out = (Int24 *)outBuffer;
\r
9917 if (info.inFormat == RTAUDIO_SINT8) {
\r
9918 signed char *in = (signed char *)inBuffer;
\r
9919 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9920 for (j=0; j<info.channels; j++) {
\r
9921 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 16);
\r
9922 //out[info.outOffset[j]] <<= 16;
\r
9924 in += info.inJump;
\r
9925 out += info.outJump;
\r
9928 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9929 Int16 *in = (Int16 *)inBuffer;
\r
9930 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9931 for (j=0; j<info.channels; j++) {
\r
9932 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 8);
\r
9933 //out[info.outOffset[j]] <<= 8;
\r
9935 in += info.inJump;
\r
9936 out += info.outJump;
\r
9939 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9940 // Channel compensation and/or (de)interleaving only.
\r
9941 Int24 *in = (Int24 *)inBuffer;
\r
9942 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9943 for (j=0; j<info.channels; j++) {
\r
9944 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9946 in += info.inJump;
\r
9947 out += info.outJump;
\r
9950 else if (info.inFormat == RTAUDIO_SINT32) {
\r
9951 Int32 *in = (Int32 *)inBuffer;
\r
9952 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9953 for (j=0; j<info.channels; j++) {
\r
9954 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] >> 8);
\r
9955 //out[info.outOffset[j]] >>= 8;
\r
9957 in += info.inJump;
\r
9958 out += info.outJump;
\r
9961 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
9962 Float32 *in = (Float32 *)inBuffer;
\r
9963 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9964 for (j=0; j<info.channels; j++) {
\r
9965 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
\r
9967 in += info.inJump;
\r
9968 out += info.outJump;
\r
9971 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
9972 Float64 *in = (Float64 *)inBuffer;
\r
9973 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9974 for (j=0; j<info.channels; j++) {
\r
9975 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
\r
9977 in += info.inJump;
\r
9978 out += info.outJump;
\r
9982 else if (info.outFormat == RTAUDIO_SINT16) {
\r
9983 Int16 *out = (Int16 *)outBuffer;
\r
9984 if (info.inFormat == RTAUDIO_SINT8) {
\r
9985 signed char *in = (signed char *)inBuffer;
\r
9986 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9987 for (j=0; j<info.channels; j++) {
\r
9988 out[info.outOffset[j]] = (Int16) in[info.inOffset[j]];
\r
9989 out[info.outOffset[j]] <<= 8;
\r
9991 in += info.inJump;
\r
9992 out += info.outJump;
\r
9995 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9996 // Channel compensation and/or (de)interleaving only.
\r
9997 Int16 *in = (Int16 *)inBuffer;
\r
9998 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9999 for (j=0; j<info.channels; j++) {
\r
10000 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
10002 in += info.inJump;
\r
10003 out += info.outJump;
\r
10006 else if (info.inFormat == RTAUDIO_SINT24) {
\r
10007 Int24 *in = (Int24 *)inBuffer;
\r
10008 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10009 for (j=0; j<info.channels; j++) {
\r
10010 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]].asInt() >> 8);
\r
10012 in += info.inJump;
\r
10013 out += info.outJump;
\r
10016 else if (info.inFormat == RTAUDIO_SINT32) {
\r
10017 Int32 *in = (Int32 *)inBuffer;
\r
10018 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10019 for (j=0; j<info.channels; j++) {
\r
10020 out[info.outOffset[j]] = (Int16) ((in[info.inOffset[j]] >> 16) & 0x0000ffff);
\r
10022 in += info.inJump;
\r
10023 out += info.outJump;
\r
10026 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
10027 Float32 *in = (Float32 *)inBuffer;
\r
10028 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10029 for (j=0; j<info.channels; j++) {
\r
10030 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
\r
10032 in += info.inJump;
\r
10033 out += info.outJump;
\r
10036 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
10037 Float64 *in = (Float64 *)inBuffer;
\r
10038 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10039 for (j=0; j<info.channels; j++) {
\r
10040 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
\r
10042 in += info.inJump;
\r
10043 out += info.outJump;
\r
10047 else if (info.outFormat == RTAUDIO_SINT8) {
\r
10048 signed char *out = (signed char *)outBuffer;
\r
10049 if (info.inFormat == RTAUDIO_SINT8) {
\r
10050 // Channel compensation and/or (de)interleaving only.
\r
10051 signed char *in = (signed char *)inBuffer;
\r
10052 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10053 for (j=0; j<info.channels; j++) {
\r
10054 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
10056 in += info.inJump;
\r
10057 out += info.outJump;
\r
10060 if (info.inFormat == RTAUDIO_SINT16) {
\r
10061 Int16 *in = (Int16 *)inBuffer;
\r
10062 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10063 for (j=0; j<info.channels; j++) {
\r
10064 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 8) & 0x00ff);
\r
10066 in += info.inJump;
\r
10067 out += info.outJump;
\r
10070 else if (info.inFormat == RTAUDIO_SINT24) {
\r
10071 Int24 *in = (Int24 *)inBuffer;
\r
10072 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10073 for (j=0; j<info.channels; j++) {
\r
10074 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]].asInt() >> 16);
\r
10076 in += info.inJump;
\r
10077 out += info.outJump;
\r
10080 else if (info.inFormat == RTAUDIO_SINT32) {
\r
10081 Int32 *in = (Int32 *)inBuffer;
\r
10082 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10083 for (j=0; j<info.channels; j++) {
\r
10084 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 24) & 0x000000ff);
\r
10086 in += info.inJump;
\r
10087 out += info.outJump;
\r
10090 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
10091 Float32 *in = (Float32 *)inBuffer;
\r
10092 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10093 for (j=0; j<info.channels; j++) {
\r
10094 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
\r
10096 in += info.inJump;
\r
10097 out += info.outJump;
\r
10100 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
10101 Float64 *in = (Float64 *)inBuffer;
\r
10102 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10103 for (j=0; j<info.channels; j++) {
\r
10104 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
\r
10106 in += info.inJump;
\r
10107 out += info.outJump;
\r
10113 //static inline uint16_t bswap_16(uint16_t x) { return (x>>8) | (x<<8); }
\r
10114 //static inline uint32_t bswap_32(uint32_t x) { return (bswap_16(x&0xffff)<<16) | (bswap_16(x>>16)); }
\r
10115 //static inline uint64_t bswap_64(uint64_t x) { return (((unsigned long long)bswap_32(x&0xffffffffull))<<32) | (bswap_32(x>>32)); }
\r
10117 void RtApi :: byteSwapBuffer( char *buffer, unsigned int samples, RtAudioFormat format )
\r
10119 register char val;
\r
10120 register char *ptr;
\r
10123 if ( format == RTAUDIO_SINT16 ) {
\r
10124 for ( unsigned int i=0; i<samples; i++ ) {
\r
10125 // Swap 1st and 2nd bytes.
\r
10127 *(ptr) = *(ptr+1);
\r
10130 // Increment 2 bytes.
\r
10134 else if ( format == RTAUDIO_SINT32 ||
\r
10135 format == RTAUDIO_FLOAT32 ) {
\r
10136 for ( unsigned int i=0; i<samples; i++ ) {
\r
10137 // Swap 1st and 4th bytes.
\r
10139 *(ptr) = *(ptr+3);
\r
10142 // Swap 2nd and 3rd bytes.
\r
10145 *(ptr) = *(ptr+1);
\r
10148 // Increment 3 more bytes.
\r
10152 else if ( format == RTAUDIO_SINT24 ) {
\r
10153 for ( unsigned int i=0; i<samples; i++ ) {
\r
10154 // Swap 1st and 3rd bytes.
\r
10156 *(ptr) = *(ptr+2);
\r
10159 // Increment 2 more bytes.
\r
10163 else if ( format == RTAUDIO_FLOAT64 ) {
\r
10164 for ( unsigned int i=0; i<samples; i++ ) {
\r
10165 // Swap 1st and 8th bytes
\r
10167 *(ptr) = *(ptr+7);
\r
10170 // Swap 2nd and 7th bytes
\r
10173 *(ptr) = *(ptr+5);
\r
10176 // Swap 3rd and 6th bytes
\r
10179 *(ptr) = *(ptr+3);
\r
10182 // Swap 4th and 5th bytes
\r
10185 *(ptr) = *(ptr+1);
\r
10188 // Increment 5 more bytes.
\r
10194 // Indentation settings for Vim and Emacs
\r
10196 // Local Variables:
\r
10197 // c-basic-offset: 2
\r
10198 // indent-tabs-mode: nil
\r
10201 // vim: et sts=2 sw=2
\r